source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
dpado.202001172040.local_minimum_reduction.h | //
// Created by Zhen Peng on 1/6/20.
//
#ifndef PADO_DPADO_H
#define PADO_DPADO_H
#include <vector>
//#include <unordered_map>
#include <map>
#include <algorithm>
#include <iostream>
#include <limits.h>
//#include <xmmintrin.h>
#include <immintrin.h>
#include <bitset>
#include <math.h>
#include <fstream>
#include <omp.h>
#include "globals.h"
#include "dglobals.h"
#include "dgraph.h"
namespace PADO {
template <VertexID BATCH_SIZE = 1024>
class DistBVCPLL {
private:
static const VertexID BITPARALLEL_SIZE = 50;
const inti THRESHOLD_PARALLEL = 80;
// Structure for the type of label
struct IndexType {
// struct Batch {
// VertexID batch_id; // Batch ID
// VertexID start_index; // Index to the array distances where the batch starts
// VertexID size; // Number of distances element in this batch
//
// Batch() = default;
// Batch(VertexID batch_id_, VertexID start_index_, VertexID size_):
// batch_id(batch_id_), start_index(start_index_), size(size_)
// { }
// };
struct DistanceIndexType {
VertexID start_index; // Index to the array vertices where the same-distance vertices start
VertexID size; // Number of the same-distance vertices
UnweightedDist dist; // The real distance
DistanceIndexType() = default;
DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_):
start_index(start_index_), size(size_), dist(dist_)
{ }
};
// Bit-parallel Labels
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0}
// std::vector<Batch> batches; // Batch info
std::vector<DistanceIndexType> distances; // Distance info
std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID
size_t get_size_in_bytes() const
{
return sizeof(bp_dist) +
sizeof(bp_sets) +
// batches.size() * sizeof(Batch) +
distances.size() * sizeof(DistanceIndexType) +
vertices.size() * sizeof(VertexID);
}
void clean_all_indices()
{
std::vector<DistanceIndexType>().swap(distances);
std::vector<VertexID>().swap(vertices);
}
}; //__attribute__((aligned(64)));
struct ShortIndex {
// I use BATCH_SIZE + 1 bit for indicator bit array.
// The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already.
// In this way, it helps update_label_indices() and can be reset along with other indicator elements.
// std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already
// If the Batch structure is not used, the indicator could just be BATCH_SIZE long.
std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0);
// std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0);
// Use a queue to store candidates
std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE);
VertexID end_candidates_que = 0;
std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0);
void indicator_reset()
{
std::fill(indicator.begin(), indicator.end(), 0);
}
}; //__attribute__((aligned(64)));
// Type of Bit-Parallel Label
struct BPLabelType {
UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 };
uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0}
};
// Type of Label Message Unit, for initializing distance table
struct LabelTableUnit {
VertexID root_id;
VertexID label_global_id;
UnweightedDist dist;
LabelTableUnit() = default;
LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
root_id(r), label_global_id(l), dist(d) {}
};
// Type of BitParallel Label Message Unit for initializing bit-parallel labels
struct MsgBPLabel {
VertexID r_root_id;
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2];
MsgBPLabel() = default;
MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
: r_root_id(r)
{
memcpy(bp_dist, dist, sizeof(bp_dist));
memcpy(bp_sets, sets, sizeof(bp_sets));
}
};
VertexID num_v = 0;
VertexID num_masters = 0;
// VertexID BATCH_SIZE = 0;
int host_id = 0;
int num_hosts = 0;
MPI_Datatype V_ID_Type;
std::vector<IndexType> L;
inline void bit_parallel_push_labels(
const DistGraph &G,
VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
UnweightedDist iter);
inline void bit_parallel_labeling(
const DistGraph &G,
std::vector<uint8_t> &used_bp_roots);
// inline void bit_parallel_push_labels(
// const DistGraph &G,
// VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// UnweightedDist iter);
// inline void bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots);
inline void batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start,
const VertexID roots_size,
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated);
// std::vector<bool> &once_candidated);
inline VertexID initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots);
// inline void push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter);
inline void local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
inline void local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
// inline void local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline bool distance_query(
VertexID cand_root_id,
VertexID v_id,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter);
inline void insert_label_only_seq(
VertexID cand_root_id,
// VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send);
// UnweightedDist iter);
inline void insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send);
inline void update_label_indices(
const VertexID v_id,
const VertexID inserted_count,
// std::vector<IndexType> &L,
// std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter);
inline void reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue);
// template <typename E_T, typename F>
// inline void every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun);
template <typename E_T>
inline void one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv);
// // Function: get the destination host id which is i hop from this host.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_me_host_id(int hop) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// return (host_id + hop + num_hosts) % num_hosts;
// }
// // Function: get the destination host id which is i hop from the root.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_root_host_id(int hop, int root) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// assert(root >= 0 && root < num_hosts);
// return (root + hop + num_hosts) % num_hosts;
// }
size_t get_index_size()
{
size_t bytes = 0;
for (VertexID v_i = 0; v_i < num_masters; ++v_i) {
bytes += L[v_i].get_size_in_bytes();
}
return bytes;
}
// Test only
// uint64_t normal_hit_count = 0;
// uint64_t bp_hit_count = 0;
// uint64_t total_check_count = 0;
// uint64_t normal_check_count = 0;
// uint64_t total_candidates_num = 0;
// uint64_t set_candidates_num = 0;
// double initializing_time = 0;
// double candidating_time = 0;
// double adding_time = 0;
// double distance_query_time = 0;
// double init_index_time = 0;
// double init_dist_matrix_time = 0;
// double init_start_reset_time = 0;
// double init_indicators_time = 0;
//L2CacheMissRate cache_miss;
// double message_time = 0;
// double bp_labeling_time = 0;
// double initializing_time = 0;
// double scatter_time = 0;
// double gather_time = 0;
// double clearup_time = 0;
// TotalInstructsExe candidating_ins_count;
// TotalInstructsExe adding_ins_count;
// TotalInstructsExe bp_labeling_ins_count;
// TotalInstructsExe bp_checking_ins_count;
// TotalInstructsExe dist_query_ins_count;
// End test
public:
// std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0);
DistBVCPLL() = default;
explicit DistBVCPLL(
const DistGraph &G);
// UnweightedDist dist_distance_query_pair(
// VertexID a_global,
// VertexID b_global,
// const DistGraph &G);
}; // class DistBVCPLL
template <VertexID BATCH_SIZE>
DistBVCPLL<BATCH_SIZE>::
DistBVCPLL(
const DistGraph &G)
{
num_v = G.num_v;
assert(num_v >= BATCH_SIZE);
num_masters = G.num_masters;
host_id = G.host_id;
// {
// if (1 == host_id) {
// volatile int i = 0;
// while (i == 0) {
// sleep(5);
// }
// }
// }
num_hosts = G.num_hosts;
V_ID_Type = G.V_ID_Type;
// L.resize(num_v);
L.resize(num_masters);
VertexID remainer = num_v % BATCH_SIZE;
VertexID b_i_bound = num_v / BATCH_SIZE;
std::vector<uint8_t> used_bp_roots(num_v, 0);
//cache_miss.measure_start();
double time_labeling = -WallTimer::get_time_mark();
// bp_labeling_time -= WallTimer::get_time_mark();
bit_parallel_labeling(G,
used_bp_roots);
// bp_labeling_time += WallTimer::get_time_mark();
{//test
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("host_id: %u bp_labeling_finished.\n", host_id);
}
//#endif
}
std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue.
VertexID end_active_queue = 0;
std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
// std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue.
VertexID end_got_candidates_queue = 0;
std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
// std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
std::vector<ShortIndex> short_index(num_masters);
std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST));
std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue.
// Used mainly for resetting short_index[v].indicator.
VertexID end_once_candidated_queue = 0;
std::vector<uint8_t> once_candidated(num_masters, false);
// std::vector<bool> once_candidated(num_masters, false);
std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table.
std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels
//printf("b_i_bound: %u\n", b_i_bound);//test
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// {// Batch number limit
// if (10 == b_i) {
// remainer = 0;
// break;
// }
// }
{
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("b_i: %u\n", b_i);//test
}
//#endif
}
batch_process(
G,
// b_i,
b_i * BATCH_SIZE,
BATCH_SIZE,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
// exit(EXIT_SUCCESS); //test
}
if (remainer != 0) {
{
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("b_i: %u\n", b_i_bound);//test
}
//#endif
}
batch_process(
G,
// b_i_bound,
b_i_bound * BATCH_SIZE,
remainer,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
}
time_labeling += WallTimer::get_time_mark();
//cache_miss.measure_stop();
// Test
setlocale(LC_NUMERIC, "");
if (0 == host_id) {
printf("BATCH_SIZE: %u\n", BATCH_SIZE);
printf("BP_Size: %u\n", BITPARALLEL_SIZE);
}
{// Total Number of Labels
EdgeID local_num_labels = 0;
for (VertexID v_global = 0; v_global < num_v; ++v_global) {
if (G.get_master_host_id(v_global) != host_id) {
continue;
}
local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size();
}
EdgeID global_num_labels;
MPI_Allreduce(&local_num_labels,
&global_num_labels,
1,
MPI_Instance::get_mpi_datatype<EdgeID>(),
MPI_SUM,
MPI_COMM_WORLD);
// printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v);
}
// VertexID local_num_batches = 0;
// VertexID local_num_distances = 0;
//// double local_avg_distances_per_batches = 0;
// for (VertexID v_global = 0; v_global < num_v; ++v_global) {
// if (G.get_master_host_id(v_global) != host_id) {
// continue;
// }
// VertexID v_local = G.get_local_vertex_id(v_global);
// local_num_batches += L[v_local].batches.size();
// local_num_distances += L[v_local].distances.size();
//// double avg_d_p_b = 0;
//// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) {
//// avg_d_p_b += L[v_local].batches[i_b].size;
//// }
//// avg_d_p_b /= L[v_local].batches.size();
//// local_avg_distances_per_batches += avg_d_p_b;
// }
//// local_avg_distances_per_batches /= num_masters;
//// double local_avg_batches = local_num_batches * 1.0 / num_masters;
//// double local_avg_distances = local_num_distances * 1.0 / num_masters;
// uint64_t global_num_batches = 0;
// uint64_t global_num_distances = 0;
// MPI_Allreduce(
// &local_num_batches,
// &global_num_batches,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_batches /= num_hosts;
// MPI_Allreduce(
// &local_num_distances,
// &global_num_distances,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_distances /= num_hosts;
// double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches;
// double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances;
// double global_avg_batches = global_num_batches / num_v;
// double global_avg_distances = global_num_distances / num_v;
//// MPI_Allreduce(
//// &local_avg_distances_per_batches,
//// &global_avg_d_p_b,
//// 1,
//// MPI_DOUBLE,
//// MPI_SUM,
//// MPI_COMM_WORLD);
//// global_avg_d_p_b /= num_hosts;
// MPI_Barrier(MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("global_avg_batches: %f "
// "global_avg_distances: %f "
// "global_avg_distances_per_batch: %f "
// "global_avg_labels_per_distance: %f\n",
// global_avg_batches,
// global_avg_distances,
// global_avg_d_p_b,
// global_avg_l_p_d);
// }
}
// printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100);
// printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100);
// printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100);
// printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100);
// printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100);
// printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100);
// printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100);
// printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100);
// printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100);
// uint64_t total_check_count = bp_hit_count + normal_check_count;
// printf("total_check_count: %'llu\n", total_check_count);
// printf("bp_hit_count: %'llu %.2f%%\n",
// bp_hit_count,
// bp_hit_count * 100.0 / total_check_count);
// printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count);
// printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n",
// total_candidates_num,
// set_candidates_num,
// set_candidates_num * 100.0 / total_candidates_num);
// printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n",
// normal_hit_count,
// normal_hit_count * 100.0 / total_check_count,
// normal_hit_count * 100.0 / (total_check_count - bp_hit_count));
//cache_miss.print();
// printf("Candidating: "); candidating_ins_count.print();
// printf("Adding: "); adding_ins_count.print();
// printf("BP_Labeling: "); bp_labeling_ins_count.print();
// printf("BP_Checking: "); bp_checking_ins_count.print();
// printf("distance_query: "); dist_query_ins_count.print();
// printf("num_hosts: %u host_id: %u\n"
// "Local_labeling_time: %.2f seconds\n"
// "bp_labeling_time: %.2f %.2f%%\n"
// "initializing_time: %.2f %.2f%%\n"
// "scatter_time: %.2f %.2f%%\n"
// "gather_time: %.2f %.2f%%\n"
// "clearup_time: %.2f %.2f%%\n"
// "message_time: %.2f %.2f%%\n",
// num_hosts, host_id,
// time_labeling,
// bp_labeling_time, 100.0 * bp_labeling_time / time_labeling,
// initializing_time, 100.0 * initializing_time / time_labeling,
// scatter_time, 100.0 * scatter_time / time_labeling,
// gather_time, 100.0 * gather_time / time_labeling,
// clearup_time, 100.0 * clearup_time / time_labeling,
// message_time, 100.0 * message_time / time_labeling);
double global_time_labeling;
MPI_Allreduce(&time_labeling,
&global_time_labeling,
1,
MPI_DOUBLE,
MPI_MAX,
MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("num_hosts: %d "
"Global_labeling_time: %.2f seconds\n",
num_hosts,
global_time_labeling);
}
// End test
}
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling(
// const DistGraph &G,
// std::vector<uint8_t> &used_bp_roots)
//{
//// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
//
// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_v); // active queue
// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// while (r < num_v && used_bp_roots[r]) {
// ++r;
// }
// if (r == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// used_bp_roots[r] = true;
//
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// que[que_h++] = r;
// tmp_d[r] = 0;
// que_t1 = que_h;
//
// int ns = 0; // number of selected neighbor, default 64
// // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward
// // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF.
//// VertexID i_bound = G.vertices[r] - 1;
//// VertexID i_start = i_bound + G.out_degrees[r];
//// for (VertexID i = i_start; i > i_bound; --i) {
// //int i_bound = G.vertices[r];
// //int i_start = i_bound + G.out_degrees[r] - 1;
// //for (int i = i_start; i >= i_bound; --i) {
// VertexID d_i_bound = G.local_out_degrees[r];
// EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1;
// for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) {
// EdgeID i = i_start - d_i;
// VertexID v = G.out_edges[i];
// if (!used_bp_roots[v]) {
// used_bp_roots[v] = true;
// // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set)
// que[que_h++] = v;
// tmp_d[v] = 1;
// tmp_s[v].first = 1ULL << ns;
// if (++ns == 64) break;
// }
// }
// //}
//// }
//
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
// for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) {
// VertexID v = que[que_i];
//// bit_parallel_push_labels(G,
//// v,
//// que,
//// que_h,
//// sibling_es,
//// num_sibling_es,
//// child_es,
//// num_child_es,
//// tmp_d,
//// d);
// EdgeID i_start = G.vertices_idx[v];
// EdgeID i_bound = i_start + G.local_out_degrees[v];
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv = G.out_edges[i];
// UnweightedDist td = d + 1;
//
// if (d > tmp_d[tv]) {
// ;
// }
// else if (d == tmp_d[tv]) {
// if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v;
// sibling_es[num_sibling_es].second = tv;
// ++num_sibling_es;
// }
// } else { // d < tmp_d[tv]
// if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) {
// que[que_h++] = tv;
// tmp_d[tv] = td;
// }
// child_es[num_child_es].first = v;
// child_es[num_child_es].second = tv;
// ++num_child_es;
// }
// }
// }
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first;
// tmp_s[w].second |= tmp_s[v].first;
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
//
// {// test
// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (4 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//
// que_t0 = que_t1;
// que_t1 = que_h;
// }
//
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = tmp_d[v];
// L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1}
// L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_push_labels(
const DistGraph &G,
const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
const UnweightedDist iter)
{
EdgeID i_start = G.vertices_idx[v_global];
EdgeID i_bound = i_start + G.local_out_degrees[v_global];
// {//test
// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
// }
for (EdgeID i = i_start; i < i_bound; ++i) {
VertexID tv_global = G.out_edges[i];
VertexID tv_local = G.get_local_vertex_id(tv_global);
UnweightedDist td = iter + 1;
if (iter > dists[tv_local]) {
;
} else if (iter == dists[tv_local]) {
if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global;
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global;
++size_tmp_sibling_es;
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
}
} else { // iter < dists[tv]
if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) {
tmp_q[offset_tmp_q + size_tmp_q++] = tv_global;
}
}
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global;
tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global;
++size_tmp_child_es;
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
}
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_labeling(
const DistGraph &G,
// std::vector<IndexType> &L,
std::vector<uint8_t> &used_bp_roots)
{
// Class type of Bit-Parallel label message unit.
struct MsgUnitBP {
VertexID v_global;
uint64_t S_n1;
uint64_t S_0;
MsgUnitBP() = default;
// MsgUnitBP(MsgUnitBP&& other) = default;
// MsgUnitBP(MsgUnitBP& other) = default;
// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
: v_global(v), S_n1(sn1), S_0(s0) { }
};
// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
EdgeID local_num_edges = G.num_edges_local;
std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
std::vector<VertexID> que(num_masters); // active queue
VertexID end_que = 0;
std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
VertexID end_tmp_que = 0;
std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
VertexID r_global = 0; // root r
for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// {// test
// if (0 == host_id) {
// printf("i_bpsp: %u\n", i_bpspt);
// }
// }
// Select the root r_global
if (0 == host_id) {
while (r_global < num_v && used_bp_roots[r_global]) {
++r_global;
}
if (r_global == num_v) {
for (VertexID v = 0; v < num_v; ++v) {
L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
}
continue;
}
}
// Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&r_global,
1,
V_ID_Type,
0,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
// Mark the r_global
if (G.get_master_host_id(r_global) == host_id) {
tmp_d[G.get_local_vertex_id(r_global)] = 0;
que[end_que++] = r_global;
}
// Select the r_global's 64 neighbors
{
// Get r_global's neighbors into buffer_send, rank from high to low.
VertexID local_degree = G.local_out_degrees[r_global];
std::vector<VertexID> buffer_send(local_degree);
if (local_degree) {
EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
EdgeID e_i = e_i_start - d_i;
buffer_send[d_i] = G.out_edges[e_i];
}
}
// Get selected neighbors (up to 64)
std::vector<VertexID> selected_nbrs;
if (0 != host_id) {
// Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
MPI_Instance::send_buffer_2_dst(buffer_send,
0,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// Receive selected neighbors from host 0
MPI_Instance::recv_buffer_from_src(selected_nbrs,
0,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
} else {
// Host 0
// Host 0 receives neighbors from others
std::vector<VertexID> all_nbrs(buffer_send);
std::vector<VertexID > buffer_recv;
for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
MPI_Instance::recv_buffer_from_any(buffer_recv,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
if (buffer_recv.empty()) {
continue;
}
buffer_send.resize(buffer_send.size() + buffer_recv.size());
std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
all_nbrs.resize(buffer_send.size());
all_nbrs.assign(buffer_send.begin(), buffer_send.end());
}
assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// Select 64 (or less) neighbors
VertexID ns = 0; // number of selected neighbor, default 64
for (VertexID v_global : all_nbrs) {
if (used_bp_roots[v_global]) {
continue;
}
used_bp_roots[v_global] = 1;
selected_nbrs.push_back(v_global);
if (++ns == 64) {
break;
}
}
// Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
for (int dest = 1; dest < num_hosts; ++dest) {
MPI_Instance::send_buffer_2_dst(selected_nbrs,
dest,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
}
// message_time += WallTimer::get_time_mark();
}
// {//test
// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
// }
// Synchronize the used_bp_roots.
for (VertexID v_global : selected_nbrs) {
used_bp_roots[v_global] = 1;
}
// Mark selected neighbors
for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
VertexID v_global = selected_nbrs[v_i];
if (host_id != G.get_master_host_id(v_global)) {
continue;
}
tmp_que[end_tmp_que++] = v_global;
tmp_d[G.get_local_vertex_id(v_global)] = 1;
tmp_s[v_global].first = 1ULL << v_i;
}
}
// Reduce the global number of active vertices
VertexID global_num_actives = 1;
UnweightedDist d = 0;
while (global_num_actives) {
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("d: %u que_size: %u\n", d, global_num_actives);
// }
// }
//#endif
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
VertexID num_sibling_es = 0, num_child_es = 0;
// Send active masters to mirrors
{
std::vector<MsgUnitBP> buffer_send(end_que);
for (VertexID que_i = 0; que_i < end_que; ++que_i) {
VertexID v_global = que[que_i];
buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
}
// {// test
// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
// }
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgUnitBP> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
// For parallel adding to queue
VertexID size_buffer_recv = buffer_recv.size();
std::vector<VertexID> offsets_tmp_q(size_buffer_recv);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) {
offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global];
}
VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q);
std::vector<VertexID> tmp_q(num_neighbors);
std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0);
// For parallel adding to sibling_es
std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors);
std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0);
// For parallel adding to child_es
std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors);
std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0);
#pragma omp parallel for
// for (const MsgUnitBP &m : buffer_recv) {
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgUnitBP &m = buffer_recv[i_m];
VertexID v_global = m.v_global;
if (!G.local_out_degrees[v_global]) {
continue;
}
tmp_s[v_global].first = m.S_n1;
tmp_s[v_global].second = m.S_0;
// Push labels
bit_parallel_push_labels(
G,
v_global,
tmp_q,
sizes_tmp_q[i_m],
tmp_sibling_es,
sizes_tmp_sibling_es[i_m],
tmp_child_es,
sizes_tmp_child_es[i_m],
offsets_tmp_q[i_m],
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
tmp_d,
d);
}
{// From tmp_sibling_es to sibling_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es);
PADO::collect_into_queue(
tmp_sibling_es,
offsets_tmp_q,
sizes_tmp_sibling_es,
total_size_tmp,
sibling_es,
num_sibling_es);
}
{// From tmp_child_es to child_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es);
PADO::collect_into_queue(
tmp_child_es,
offsets_tmp_q,
sizes_tmp_child_es,
total_size_tmp,
child_es,
num_child_es);
}
{// From tmp_q to tmp_que
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q);
PADO::collect_into_queue(
tmp_q,
offsets_tmp_q,
sizes_tmp_q,
total_size_tmp,
tmp_que,
end_tmp_que);
}
// {// test
// printf("host_id: %u root: %u done push.\n", host_id, root);
// }
}
}
// Update the sets in tmp_s
{
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first, w = sibling_es[i].second;
__atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST);
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
}
// Put into the buffer sending to others
std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first;
VertexID w = sibling_es[i].second;
buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
}
// Send the messages
for (int root = 0; root < num_hosts; ++root) {
std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
size_t i_m_bound = buffer_recv.size();
#pragma omp parallel for
for (size_t i_m = 0; i_m < i_m_bound; ++i_m) {
const auto &m = buffer_recv[i_m];
__atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST);
}
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
}
#pragma omp parallel for
for (VertexID i = 0; i < num_child_es; ++i) {
VertexID v = child_es[i].first, c = child_es[i].second;
__atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST);
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
//
//// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (0 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//#endif
// Swap que and tmp_que
tmp_que.swap(que);
end_que = end_tmp_que;
end_tmp_que = 0;
MPI_Allreduce(&end_que,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
MPI_COMM_WORLD);
// }
++d;
}
#pragma omp parallel for
for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
VertexID v_global = G.get_global_vertex_id(v_local);
L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
}
}
}
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_push_labels(
// const DistGraph &G,
// const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// const UnweightedDist iter)
//{
// EdgeID i_start = G.vertices_idx[v_global];
// EdgeID i_bound = i_start + G.local_out_degrees[v_global];
//// {//test
//// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
//// }
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv_global = G.out_edges[i];
// VertexID tv_local = G.get_local_vertex_id(tv_global);
// UnweightedDist td = iter + 1;
//
// if (iter > dists[tv_local]) {
// ;
// } else if (iter == dists[tv_local]) {
// if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
// }
// } else { // iter < dists[tv]
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
//// {
//// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test
//// }
// }
// }
//
//}
//
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots)
//{
// // Class type of Bit-Parallel label message unit.
// struct MsgUnitBP {
// VertexID v_global;
// uint64_t S_n1;
// uint64_t S_0;
//
// MsgUnitBP() = default;
//// MsgUnitBP(MsgUnitBP&& other) = default;
//// MsgUnitBP(MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
// MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
// : v_global(v), S_n1(sn1), S_0(s0) { }
// };
//// VertexID num_v = G.num_v;
//// EdgeID num_e = G.num_e;
// EdgeID local_num_edges = G.num_edges_local;
//
// std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_masters); // active queue
// VertexID end_que = 0;
// std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
// VertexID end_tmp_que = 0;
// std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
//
//// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
//// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
//// std::vector<VertexID> que(num_v); // active queue
//// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
//// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r_global = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// // Select the root r_global
// if (0 == host_id) {
// while (r_global < num_v && used_bp_roots[r_global]) {
// ++r_global;
// }
// if (r_global == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// }
// // Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&r_global,
// 1,
// V_ID_Type,
// 0,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
//
//// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// // Mark the r_global
// if (G.get_master_host_id(r_global) == host_id) {
// tmp_d[G.get_local_vertex_id(r_global)] = 0;
// que[end_que++] = r_global;
// }
// // Select the r_global's 64 neighbors
// {
// // Get r_global's neighbors into buffer_send, rank from low to high.
// VertexID local_degree = G.local_out_degrees[r_global];
// std::vector<VertexID> buffer_send(local_degree);
// if (local_degree) {
// EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
// for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
// EdgeID e_i = e_i_start - d_i;
// buffer_send[d_i] = G.out_edges[e_i];
// }
// }
//
// // Get selected neighbors (up to 64)
// std::vector<VertexID> selected_nbrs;
// if (0 != host_id) {
// // Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// 0,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
// // Receive selected neighbors from host 0
// MPI_Instance::recv_buffer_from_src(selected_nbrs,
// 0,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// } else {
// // Host 0
// // Host 0 receives neighbors from others
// std::vector<VertexID> all_nbrs(buffer_send);
// std::vector<VertexID > buffer_recv;
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::recv_buffer_from_any(buffer_recv,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
//// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv,
//// num_hosts,
//// SENDING_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// if (buffer_recv.empty()) {
// continue;
// }
//
// buffer_send.resize(buffer_send.size() + buffer_recv.size());
// std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
// all_nbrs.resize(buffer_send.size());
// all_nbrs.assign(buffer_send.begin(), buffer_send.end());
// }
// assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// // Select 64 (or less) neighbors
// VertexID ns = 0; // number of selected neighbor, default 64
// for (VertexID v_global : all_nbrs) {
// if (used_bp_roots[v_global]) {
// continue;
// }
// used_bp_roots[v_global] = 1;
// selected_nbrs.push_back(v_global);
// if (++ns == 64) {
// break;
// }
// }
// // Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
// for (int dest = 1; dest < num_hosts; ++dest) {
// MPI_Instance::send_buffer_2_dst(selected_nbrs,
// dest,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// }
// message_time += WallTimer::get_time_mark();
// }
//// {//test
//// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
//// }
//
// // Synchronize the used_bp_roots.
// for (VertexID v_global : selected_nbrs) {
// used_bp_roots[v_global] = 1;
// }
//
// // Mark selected neighbors
// for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
// VertexID v_global = selected_nbrs[v_i];
// if (host_id != G.get_master_host_id(v_global)) {
// continue;
// }
// tmp_que[end_tmp_que++] = v_global;
// tmp_d[G.get_local_vertex_id(v_global)] = 1;
// tmp_s[v_global].first = 1ULL << v_i;
// }
// }
//
// // Reduce the global number of active vertices
// VertexID global_num_actives = 1;
// UnweightedDist d = 0;
// while (global_num_actives) {
//// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
//
// // Send active masters to mirrors
// {
// std::vector<MsgUnitBP> buffer_send(end_que);
// for (VertexID que_i = 0; que_i < end_que; ++que_i) {
// VertexID v_global = que[que_i];
// buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
// }
//// {// test
//// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
//// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgUnitBP> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgUnitBP &m : buffer_recv) {
// VertexID v_global = m.v_global;
// if (!G.local_out_degrees[v_global]) {
// continue;
// }
// tmp_s[v_global].first = m.S_n1;
// tmp_s[v_global].second = m.S_0;
// // Push labels
// bit_parallel_push_labels(G,
// v_global,
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
// tmp_d,
// d);
// }
//// {// test
//// printf("host_id: %u root: %u done push.\n", host_id, root);
//// }
// }
// }
//
// // Update the sets in tmp_s
// {
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
//
// }
// // Put into the buffer sending to others
// std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
//// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1);
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first;
// VertexID w = sibling_es[i].second;
//// buffer_send.emplace_back(v, tmp_s[v].second);
//// buffer_send.emplace_back(w, tmp_s[w].second);
// buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
// buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
// }
// // Send the messages
// for (int root = 0; root < num_hosts; ++root) {
// std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
// }
////#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
// }
////#endif
//
// // Swap que and tmp_que
// tmp_que.swap(que);
// end_que = end_tmp_que;
// end_tmp_que = 0;
// MPI_Allreduce(&end_que,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
//
//// }
// ++d;
// }
//
// for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
// VertexID v_global = G.get_global_vertex_id(v_local);
// L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
// L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
// L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//}
//// Function bit parallel checking:
//// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking(
// VertexID v_id,
// VertexID w_id,
// const std::vector<IndexType> &L,
// UnweightedDist iter)
//{
// // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already
// const IndexType &Lv = L[v_id];
// const IndexType &Lw = L[w_id];
//
// _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0);
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF.
// if (td - 2 <= iter) {
// td +=
// (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 :
// ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) |
// (Lv.bp_sets[i][1] & Lw.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
//// ++bp_hit_count;
// return false;
// }
// }
// }
// return true;
//}
// Function for initializing at the begin of a batch
// For a batch, initialize the temporary labels and real labels of roots;
// traverse roots' labels to initialize distance buffer;
// unset flag arrays is_active and got_labels
template <VertexID BATCH_SIZE>
inline VertexID DistBVCPLL<BATCH_SIZE>::
initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots)
{
// Get the roots_master_local, containing all local roots.
std::vector<VertexID> roots_master_local;
VertexID size_roots_master_local;
VertexID roots_bound = roots_start + roots_size;
try {
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
roots_master_local.push_back(G.get_local_vertex_id(r_global));
}
}
size_roots_master_local = roots_master_local.size();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_roots_master_local: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Short_index
{
if (end_once_candidated_queue >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
} else {
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
}
end_once_candidated_queue = 0;
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
} else {
for (VertexID r_local : roots_master_local) {
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
}
}
//
// Real Index
try
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
} else {
for (VertexID r_local : roots_master_local) {
IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_real_index: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Dist Table
try
{
// struct LabelTableUnit {
// VertexID root_id;
// VertexID label_global_id;
// UnweightedDist dist;
//
// LabelTableUnit() = default;
//
// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
// root_id(r), label_global_id(l), dist(d) {}
// };
std::vector<LabelTableUnit> buffer_send; // buffer for sending
// Dist_matrix
{
// Deprecated Old method: unpack the IndexType structure before sending.
// Okay, it's back.
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
// Offsets for adding labels to buffer_send in parallel
std::vector<VertexID> offsets_beffer_send(size_roots_master_local);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
offsets_beffer_send[i_r] = L[r_local].vertices.size();
}
EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send);
buffer_send.resize(size_labels);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
VertexID top_location = 0;
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lr.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
// buffer_send[offsets_beffer_send[i_r] + top_location++] =
// LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist);
buffer_send[offsets_beffer_send[i_r] + top_location++] =
LabelTableUnit(r_root_id, Lr.vertices[v_i], dist);
}
}
// }
}
} else {
for (VertexID r_local : roots_master_local) {
// The distance table.
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lr.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
buffer_send.emplace_back(r_root_id, Lr.vertices[v_i],
dist); // buffer for sending
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
}
}
// }
}
}
}
// Broadcast local roots labels
for (int root = 0; root < num_hosts; ++root) {
std::vector<LabelTableUnit> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record root_id's number of its received label, for later adding to recved_dist_table
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
// recved_dist_table[root_id].push_back(label_global_id);
}
// Record the received label in recved_dist_table, for later reset
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID &size = sizes_recved_root_labels[root_id];
if (size) {
recved_dist_table[root_id].resize(size);
size = 0;
}
}
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id);
}
} else {
for (const LabelTableUnit &l : buffer_recv) {
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record the received label in recved_dist_table, for later reset
recved_dist_table[root_id].push_back(label_global_id);
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_dist_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build the Bit-Parallel Labels Table
try
{
// struct MsgBPLabel {
// VertexID r_root_id;
// UnweightedDist bp_dist[BITPARALLEL_SIZE];
// uint64_t bp_sets[BITPARALLEL_SIZE][2];
//
// MsgBPLabel() = default;
// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
// : r_root_id(r)
// {
// memcpy(bp_dist, dist, sizeof(bp_dist));
// memcpy(bp_sets, sets, sizeof(bp_sets));
// }
// };
// std::vector<MPI_Request> requests_send(num_hosts - 1);
std::vector<MsgBPLabel> buffer_send;
std::vector<VertexID> roots_queue;
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) != host_id) {
continue;
}
roots_queue.push_back(r_global);
}
VertexID size_roots_queue = roots_queue.size();
if (size_roots_queue >= THRESHOLD_PARALLEL) {
buffer_send.resize(size_roots_queue);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) {
VertexID r_global = roots_queue[i_r];
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
} else {
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
for (VertexID r_global : roots_queue) {
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Local roots
// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// Prepare for sending
buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
}
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgBPLabel> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
VertexID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgBPLabel &m = buffer_recv[i_m];
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
} else {
for (const MsgBPLabel &m : buffer_recv) {
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_bp_labels_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Active_queue
VertexID global_num_actives = 0; // global number of active vertices.
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
active_queue[i_r] = r_local;
}
end_active_queue = size_roots_master_local;
} else {
for (VertexID r_local : roots_master_local) {
active_queue[end_active_queue++] = r_local;
}
}
// Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
// MPI_SUM,
MPI_MAX,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
}
return global_num_actives;
}
// Sequential Version
//// Function for initializing at the begin of a batch
//// For a batch, initialize the temporary labels and real labels of roots;
//// traverse roots' labels to initialize distance buffer;
//// unset flag arrays is_active and got_labels
//template <VertexID BATCH_SIZE>
//inline VertexID DistBVCPLL<BATCH_SIZE>::
//initialization(
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated,
// VertexID b_id,
// VertexID roots_start,
// VertexID roots_size,
//// std::vector<VertexID> &roots_master_local,
// const std::vector<uint8_t> &used_bp_roots)
//{
// // Get the roots_master_local, containing all local roots.
// std::vector<VertexID> roots_master_local;
// VertexID roots_bound = roots_start + roots_size;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
// roots_master_local.push_back(G.get_local_vertex_id(r_global));
// }
// }
// // Short_index
// {
// for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
// VertexID v_local = once_candidated_queue[v_i];
// short_index[v_local].indicator_reset();
// once_candidated[v_local] = 0;
// }
// end_once_candidated_queue = 0;
// for (VertexID r_local : roots_master_local) {
// short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
//// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself
//// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels
// }
// }
////
// // Real Index
// {
// for (VertexID r_local : roots_master_local) {
// IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
// Lr.distances.emplace_back(
// Lr.vertices.size(), // start_index
// 1, // size
// 0); // dist
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
// }
// }
//
// // Dist Table
// {
//// struct LabelTableUnit {
//// VertexID root_id;
//// VertexID label_global_id;
//// UnweightedDist dist;
////
//// LabelTableUnit() = default;
////
//// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
//// root_id(r), label_global_id(l), dist(d) {}
//// };
// std::vector<LabelTableUnit> buffer_send; // buffer for sending
// // Dist_matrix
// {
// // Deprecated Old method: unpack the IndexType structure before sending.
// for (VertexID r_local : roots_master_local) {
// // The distance table.
// IndexType &Lr = L[r_local];
// VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// // Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// UnweightedDist dist = Lr.distances[dist_i].dist;
// // Traverse vertices array
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// // Write into the dist_table
//// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
// }
// }
// }
// }
// }
// // Broadcast local roots labels
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<LabelTableUnit> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const LabelTableUnit &l : buffer_recv) {
// VertexID root_id = l.root_id;
// VertexID label_global_id = l.label_global_id;
// UnweightedDist dist = l.dist;
// dist_table[root_id][label_global_id] = dist;
// // Record the received label in recved_dist_table, for later reset
// recved_dist_table[root_id].push_back(label_global_id);
// }
// }
// }
//
// // Build the Bit-Parallel Labels Table
// {
//// struct MsgBPLabel {
//// VertexID r_root_id;
//// UnweightedDist bp_dist[BITPARALLEL_SIZE];
//// uint64_t bp_sets[BITPARALLEL_SIZE][2];
////
//// MsgBPLabel() = default;
//// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
//// : r_root_id(r)
//// {
//// memcpy(bp_dist, dist, sizeof(bp_dist));
//// memcpy(bp_sets, sets, sizeof(bp_sets));
//// }
//// };
//// std::vector<MPI_Request> requests_send(num_hosts - 1);
// std::vector<MsgBPLabel> buffer_send;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
// VertexID r_local = G.get_local_vertex_id(r_global);
// VertexID r_root = r_global - roots_start;
// // Local roots
//// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
//// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// // Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgBPLabel> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgBPLabel &m : buffer_recv) {
// VertexID r_root = m.r_root_id;
// memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// }
// }
// }
//
// // TODO: parallel enqueue
// // Active_queue
// VertexID global_num_actives = 0; // global number of active vertices.
// {
// for (VertexID r_local : roots_master_local) {
// active_queue[end_active_queue++] = r_local;
// }
// // Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
//
// return global_num_actives;
//}
//// Function: push v_head_global's newly added labels to its all neighbors.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// VertexID label_global_id = label_root_id + roots_start;
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// if (v_tail_global <= label_global_id) {
// // remaining v_tail_global has higher rank than the label
// return;
// }
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
//// {// Just for the complain from the compiler
//// assert(iter >= iter);
//// }
//}
template<VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter)
{
std::vector<std::pair<VertexID, VertexID> > buffer_send_indices;
//.first: Vertex ID
//.second: size of labels
std::vector<VertexID> buffer_send_labels;
if (local_size) {
const VertexID start_active_queue = global_start;
const VertexID size_active_queue = global_size <= local_size ?
global_size :
local_size;
const VertexID bound_active_queue = start_active_queue + size_active_queue;
buffer_send_indices.resize(size_active_queue);
// Prepare offset for inserting
std::vector<VertexID> offsets_buffer_locs(size_active_queue);
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
const IndexType &Lv = L[v_head_local];
offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
}
EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
try {
buffer_send_labels.resize(size_buffer_send_labels);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc "
"host_id: %d "
"size_buffer_send_labels: %lu "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
size_buffer_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build buffer_send_labels by parallel inserting
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
VertexID v_head_global = G.get_global_vertex_id(v_head_local);
const IndexType &Lv = L[v_head_local];
// Prepare the buffer_send_indices
VertexID tmp_i_q = i_q - start_active_queue;
buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// These 2 index are used for traversing v_head's last inserted labels
VertexID l_i_start = Lv.distances.rbegin()->start_index;
VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
VertexID top_labels = offsets_buffer_locs[tmp_i_q];
for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
VertexID label_root_id = Lv.vertices[l_i] - roots_start;
buffer_send_labels[top_labels++] = label_root_id;
// buffer_send_labels.push_back(label_root_id);
}
}
}
////////////////////////////////////////////////
////
// const VertexID bound_active_queue = start_active_queue + size_active_queue;
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// // Parallel Version
// // Prepare offset for inserting
// std::vector<VertexID> offsets_buffer_locs(size_active_queue);
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// const IndexType &Lv = L[v_head_local];
// offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
// }
// EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
//// {// test
//// if (0 == host_id) {
//// double memtotal = 0;
//// double memfree = 0;
//// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID);
//// PADO::Utils::system_memory(memtotal, memfree);
//// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n",
//// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024);
//// }
//// }
// buffer_send_labels.resize(size_buffer_send_labels);
//// {// test
//// if (0 == host_id) {
//// printf("buffer_send_labels created.\n");
//// }
//// }
//
// // Build buffer_send_labels by parallel inserting
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID tmp_i_q = i_q - start_active_queue;
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// VertexID top_labels = offsets_buffer_locs[tmp_i_q];
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels[top_labels++] = label_root_id;
//// buffer_send_labels.push_back(label_root_id);
// }
// }
//// end_active_queue = 0;
////
////////////////////////////////////////////////
for (int root = 0; root < num_hosts; ++root) {
// Get the indices
std::vector<std::pair<VertexID, VertexID> > indices_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_indices,
indices_buffer);
if (indices_buffer.empty()) {
continue;
}
// Get the labels
std::vector<VertexID> labels_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_labels,
labels_buffer);
VertexID size_indices_buffer = indices_buffer.size();
// Prepare the offsets for reading indices_buffer
std::vector<EdgeID> starts_locs_index(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
starts_locs_index[i_i] = e.second;
}
EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index);
// Prepare the offsets for inserting v_tails into queue
std::vector<VertexID> offsets_tmp_queue(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
offsets_tmp_queue[i_i] = G.local_out_degrees[e.first];
}
EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue);
std::vector<VertexID> tmp_got_candidates_queue;
std::vector<VertexID> sizes_tmp_got_candidates_queue;
std::vector<VertexID> tmp_once_candidated_queue;
std::vector<VertexID> sizes_tmp_once_candidated_queue;
try {
tmp_got_candidates_queue.resize(num_ngbrs);
sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0);
tmp_once_candidated_queue.resize(num_ngbrs);
sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.tmp_queues: bad_alloc "
"host_id: %d "
"num_ngbrs: %lu "
"size_indices_buffer: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
num_ngbrs,
size_indices_buffer,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
VertexID v_head_global = indices_buffer[i_i].first;
EdgeID start_index = starts_locs_index[i_i];
EdgeID bound_index = i_i != size_indices_buffer - 1 ?
starts_locs_index[i_i + 1] : total_recved_labels;
if (G.local_out_degrees[v_head_global]) {
local_push_labels_para(
v_head_global,
start_index,
bound_index,
roots_start,
labels_buffer,
G,
short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
tmp_got_candidates_queue,
sizes_tmp_got_candidates_queue[i_i],
offsets_tmp_queue[i_i],
got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
tmp_once_candidated_queue,
sizes_tmp_once_candidated_queue[i_i],
once_candidated,
bp_labels_table,
used_bp_roots,
iter);
}
}
{// Collect elements from tmp_got_candidates_queue to got_candidates_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue);
PADO::collect_into_queue(
tmp_got_candidates_queue,
offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue
sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue
total_new,
got_candidates_queue,
end_got_candidates_queue);
}
{// Collect elements from tmp_once_candidated_queue to once_candidated_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue);
PADO::collect_into_queue(
tmp_once_candidated_queue,
offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue
sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue
total_new,
once_candidated_queue,
end_once_candidated_queue);
}
}
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator[label_root_id] = 1;
{// Deal with race condition
if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
// The label is already selected before
continue;
}
}
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local;
}
// once_candidated[v_tail_local] = 1;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = 1;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!SI_v_tail.is_candidate[label_root_id]) {
if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id);
}
}
// Add into got_candidates queue
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = 1;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
if (!got_candidates[v_tail_local]) {
if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local;
}
}
}
}
// {
// assert(iter >= iter);
// }
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
if (SI_v_tail.indicator[label_root_id]) {
// The label is already selected before
continue;
}
// Record label_root_id as once selected by v_tail_global
SI_v_tail.indicator[label_root_id] = 1;
// SI_v_tail.indicator.set(label_root_id);
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
once_candidated[v_tail_local] = 1;
once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
if (SI_v_tail.is_candidate[label_root_id]) {
continue;
}
SI_v_tail.is_candidate[label_root_id] = 1;
SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!got_candidates[v_tail_local]) {
// If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
got_candidates[v_tail_local] = 1;
got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
}
}
}
// {
// assert(iter >= iter);
// }
}
//// Function: pushes v_head's labels to v_head's every (master) neighbor
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// // The data structure of a message
//// std::vector< LabelUnitType > buffer_recv;
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin() -> start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size;
// // Traverse v_head's every neighbor v_tail
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// // Traverse v_head's last inserted labels
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// VertexID label_global_id = label_root_id + roots_start;
// if (v_tail_global <= label_global_id) {
// // v_tail_global has higher rank than the label
// continue;
// }
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
//
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
// }
//
// {
// assert(iter >= iter);
// }
//}
//// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts
//// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all
//// code of this function into the caller, all messages become right.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//sync_masters_2_mirrors(
// const DistGraph &G,
// const std::vector<VertexID> &active_queue,
// VertexID end_active_queue,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send,
// std::vector<MPI_Request> &requests_send
//)
//{
//// std::vector< std::pair<VertexID, VertexID> > buffer_send;
// // pair.first: Owener vertex ID of the label
// // pair.first: label vertex ID of the label
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send.emplace_back(v_head_global, label_root_id);
//// {//test
//// if (1 == host_id) {
//// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);//
//// }
//// }
// }
// }
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// assert(!requests_send.empty());
// }
//
// // Send messages
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc);
// MPI_Isend(buffer_send.data(),
// MPI_Instance::get_sending_size(buffer_send),
// MPI_CHAR,
// dest_host_id,
// SENDING_MASTERS_TO_MIRRORS,
// MPI_COMM_WORLD,
// &requests_send[loc]);
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// }
// }
//}
// Function for distance query;
// traverse vertex v_id's labels;
// return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label.
template <VertexID BATCH_SIZE>
inline bool DistBVCPLL<BATCH_SIZE>::
distance_query(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter)
{
VertexID cand_real_id = cand_root_id + roots_start;
const IndexType &Lv = L[v_id_local];
// Traverse v_id's all existing labels
// VertexID b_i_bound = Lv.batches.size();
// _mm_prefetch(&Lv.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lv.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lv.vertices[0], _MM_HINT_T0);
//_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lv.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size;
// Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
VertexID dist_bound_index = Lv.distances.size();
for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
UnweightedDist dist = Lv.distances[dist_i].dist;
// Cannot use this, because no batch_id any more, so distances are not all in order among batches.
// if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered.
// // If the half path distance is already greater than their targeted distance, jump to next batch
// break;
// }
VertexID v_start_index = Lv.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size;
// _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
_mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0);
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id
VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id
if (v >= cand_real_id) {
// Vertex cand_real_id cannot have labels whose ranks are lower than it,
// in which case dist_table[cand_root_id][v] does not exist.
continue;
}
VertexID d_tmp = dist + dist_table[cand_root_id][v];
if (d_tmp <= iter) {
return false;
}
}
}
// }
return true;
}
//// Sequential version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_seq(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send)
// UnweightedDist iter)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// dist_table[v_root_id][cand_real_id] = iter;
// Put the update into the buffer_send for later sending
buffer_send.emplace_back(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_seq: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
//// Parallel Version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// Put the update into the buffer_send for later sending
tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_para: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function updates those index arrays in v_id's label only if v_id has been inserted new labels
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
update_label_indices(
const VertexID v_id_local,
const VertexID inserted_count,
// std::vector<IndexType> &L,
// std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter)
{
try {
IndexType &Lv = L[v_id_local];
// // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch
// if (short_index[v_id_local].indicator[BATCH_SIZE]) {
// // Increase the batches' last element's size because a new distance element need to be added
// ++(Lv.batches.rbegin() -> size);
// } else {
// short_index[v_id_local].indicator[BATCH_SIZE] = 1;
//// short_index[v_id_local].indicator.set(BATCH_SIZE);
// // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added
// Lv.batches.emplace_back(
// b_id, // batch id
// Lv.distances.size(), // start index
// 1); // size
// }
// Insert a new distance element with start_index, size, and dist
Lv.distances.emplace_back(
Lv.vertices.size() - inserted_count, // start index
inserted_count, // size
iter); // distance
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("update_label_indices: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function to reset dist_table the distance buffer to INF
// Traverse every root's labels to reset its distance buffer elements to INF.
// In this way to reduce the cost of initialization of the next batch.
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue)
{
// // Reset dist_table according to local masters' labels
// for (VertexID r_local_id : roots_master_local) {
// IndexType &Lr = L[r_local_id];
// VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST;
// }
// }
// }
// }
// Reset dist_table according to received masters' labels from other hosts
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
for (VertexID cand_real_id : recved_dist_table[r_root_id]) {
dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST;
}
recved_dist_table[r_root_id].clear();
}
// Reset bit-parallel labels table
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist));
memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets));
}
// Remove labels of local minimum set
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local_id = once_candidated_queue[v_i];
if (!G.is_local_minimum[v_local_id]) {
continue;
}
L[v_local_id].clean_all_indices();
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start, // start id of roots
const VertexID roots_size, // how many roots in the batch
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated)
// std::vector<bool> &once_candidated)
{
// At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// The Maximum of active vertices among hosts.
VertexID global_num_actives = initialization(G,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
active_queue,
end_active_queue,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
// b_id,
roots_start,
roots_size,
// roots_master_local,
used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
UnweightedDist iter = 0; // The iterator, also the distance for current iteration
// {//test
// if (0 == host_id) {
// printf("host_id: %u initialization finished.\n", host_id);
// }
// }
while (global_num_actives) {
++iter;
//#ifdef DEBUG_MESSAGES_ON
// {//test
//// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("iter: %u "
// "host_id: %d "
// "global_num_actives: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// iter,
// host_id,
// global_num_actives,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
//// }
// }
//#endif
// Traverse active vertices to push their labels as candidates
// Send masters' newly added labels to other hosts
try
{
// scatter_time -= WallTimer::get_time_mark();
// Divide the pushing into many-time runs.
const VertexID chunk_size = 1 << 13;
VertexID remainder = global_num_actives % chunk_size;
VertexID bound_global_i = global_num_actives - remainder;
// VertexID remainder = end_active_queue % chunk_size;
// VertexID bound_active_queue = end_active_queue - remainder;
VertexID local_size;
for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) {
if (global_i < end_active_queue) {
local_size = end_active_queue - global_i;
} else {
local_size = 0;
}
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
global_i,
chunk_size,
local_size,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
}
if (remainder) {
if (bound_global_i < end_active_queue) {
local_size = end_active_queue - bound_global_i;
} else {
local_size = 0;
}
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
bound_global_i,
remainder,
local_size,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
}
//
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// 0,
// end_active_queue,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
end_active_queue = 0;
// scatter_time += WallTimer::get_time_mark();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("pushing: bad_alloc "
"iter: %u "
"host_id: %d "
"global_num_actives: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
iter,
host_id,
global_num_actives,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Traverse vertices in the got_candidates_queue to insert labels
{
// gather_time -= WallTimer::get_time_mark();
std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// pair.first: root id
// pair.second: label (global) id of the root
// if (true) {
if (end_got_candidates_queue >= THRESHOLD_PARALLEL) {
// Prepare for parallel active_queue
// Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already.
// Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it.
std::vector<VertexID> offsets_tmp_active_queue;
std::vector<VertexID> tmp_active_queue;
std::vector<VertexID> sizes_tmp_active_queue;
std::vector<EdgeID> offsets_tmp_buffer_send;
std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send;
std::vector<EdgeID> sizes_tmp_buffer_send;
EdgeID total_send_labels;
try {
offsets_tmp_active_queue.resize(end_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
offsets_tmp_active_queue[i_q] = i_q;
}
tmp_active_queue.resize(end_got_candidates_queue);
sizes_tmp_active_queue.resize(end_got_candidates_queue,
0); // Size will only be 0 or 1, but it will become offsets eventually.
// Prepare for parallel buffer_send
// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue);
offsets_tmp_buffer_send.resize(end_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
VertexID v_id_local = got_candidates_queue[i_q];
VertexID v_global_id = G.get_global_vertex_id(v_id_local);
if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// If v_global_id is root, its new labels should be put into buffer_send
offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que;
} else {
offsets_tmp_buffer_send[i_q] = 0;
}
}
total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
tmp_buffer_send.resize(total_send_labels);
sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_tmp_buffer_send: bad_alloc "
"host_id: %d "
"iter: %u "
"end_got_candidates_queue: %u "
"total_send_labels: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
host_id,
iter,
end_got_candidates_queue,
total_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_para(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
tmp_buffer_send,
sizes_tmp_buffer_send[i_queue],
offsets_tmp_buffer_send[i_queue]);
// buffer_send);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
// short_index,
// b_id,
iter);
}
}
{// Collect elements from tmp_active_queue to active_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
PADO::collect_into_queue(
tmp_active_queue,
offsets_tmp_active_queue,
sizes_tmp_active_queue,
total_new,
active_queue,
end_active_queue);
}
{// Collect elements from tmp_buffer_send to buffer_send
EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
try {
buffer_send.resize(total_new);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_buffer_send: bad_alloc "
"iter: %u "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
iter,
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
EdgeID zero_size = 0;
PADO::collect_into_queue(
tmp_buffer_send,
offsets_tmp_buffer_send,
sizes_tmp_buffer_send,
total_new,
buffer_send,
zero_size);
}
} else {
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
active_queue[end_active_queue++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_seq(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
// dist_table,
buffer_send);
// iter);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
// short_index,
// b_id,
iter);
}
}
}
// {//test
// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
// }
end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// Sync the dist_table
for (int root = 0; root < num_hosts; ++root) {
std::vector<std::pair<VertexID, VertexID>> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
try {
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
// Get label number for every root
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
}
// Resize the recved_dist_table for every root
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID old_size = recved_dist_table[root_id].size();
VertexID tmp_size = sizes_recved_root_labels[root_id];
if (tmp_size) {
recved_dist_table[root_id].resize(old_size + tmp_size);
sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// If tmp_size == 0, root_id has no received labels.
// sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// Recorde received labels in recved_dist_table
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id],
cand_real_id);
}
} else {
for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
// Record the received element, for future reset
recved_dist_table[root_id].push_back(cand_real_id);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("recved_dist_table: bad_alloc "
"host_id: %d "
"iter: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
iter,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Sync the global_num_actives
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
// MPI_SUM,
MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
}
// {//test
// if (0 == host_id) {
// printf("iter: %u inserting labels finished.\n", iter);
// }
// }
}
// Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
reset_at_end(
G,
// roots_start,
// roots_master_local,
dist_table,
recved_dist_table,
bp_labels_table,
once_candidated_queue,
end_once_candidated_queue);
// clearup_time += WallTimer::get_time_mark();
// {//test
// if (0 == host_id) {
// printf("host_id: %u resetting finished.\n", host_id);
// }
// }
}
//// Sequential Version
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//batch_process(
// const DistGraph &G,
// VertexID b_id,
// VertexID roots_start, // start id of roots
// VertexID roots_size, // how many roots in the batch
// const std::vector<uint8_t> &used_bp_roots,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<uint8_t> &got_candidates,
//// std::vector<bool> &got_candidates,
// std::vector<uint8_t> &is_active,
//// std::vector<bool> &is_active,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated)
//// std::vector<bool> &once_candidated)
//{
// // At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// VertexID global_num_actives = initialization(G,
// short_index,
// dist_table,
// recved_dist_table,
// bp_labels_table,
// active_queue,
// end_active_queue,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// b_id,
// roots_start,
// roots_size,
//// roots_master_local,
// used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
// UnweightedDist iter = 0; // The iterator, also the distance for current iteration
//// {//test
//// printf("host_id: %u initialization finished.\n", host_id);
//// }
//
//
// while (global_num_actives) {
////#ifdef DEBUG_MESSAGES_ON
//// {//
//// if (0 == host_id) {
//// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives);
//// }
//// }
////#endif
// ++iter;
// // Traverse active vertices to push their labels as candidates
// // Send masters' newly added labels to other hosts
// {
// scatter_time -= WallTimer::get_time_mark();
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels.push_back(label_root_id);
// }
// }
// end_active_queue = 0;
//
// for (int root = 0; root < num_hosts; ++root) {
// // Get the indices
// std::vector< std::pair<VertexID, VertexID> > indices_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_indices,
// indices_buffer);
// if (indices_buffer.empty()) {
// continue;
// }
// // Get the labels
// std::vector<VertexID> labels_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_labels,
// labels_buffer);
// // Push those labels
// EdgeID start_index = 0;
// for (const std::pair<VertexID, VertexID> e : indices_buffer) {
// VertexID v_head_global = e.first;
// EdgeID bound_index = start_index + e.second;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// got_candidates_queue,
// end_got_candidates_queue,
// got_candidates,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// start_index = bound_index;
// }
// }
// scatter_time += WallTimer::get_time_mark();
// }
//
// // Traverse vertices in the got_candidates_queue to insert labels
// {
// gather_time -= WallTimer::get_time_mark();
// std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// // pair.first: root id
// // pair.second: label (global) id of the root
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if ( distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter) ) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
//// dist_table,
// buffer_send);
//// iter);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
// short_index,
// b_id,
// iter);
// }
// }
//// {//test
//// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
//// }
// end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// // Sync the dist_table
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<std::pair<VertexID, VertexID>> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
// VertexID root_id = e.first;
// VertexID cand_real_id = e.second;
// dist_table[root_id][cand_real_id] = iter;
// // Record the received element, for future reset
// recved_dist_table[root_id].push_back(cand_real_id);
// }
// }
//
// // Sync the global_num_actives
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
// }
// }
//
// // Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
// reset_at_end(
//// G,
//// roots_start,
//// roots_master_local,
// dist_table,
// recved_dist_table,
// bp_labels_table);
// clearup_time += WallTimer::get_time_mark();
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Every host h_i broadcast to others
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<E_T> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
//// uint64_t size_buffer_send = buffer_send.size();
//// // Sync the size_buffer_send.
//// message_time -= WallTimer::get_time_mark();
//// MPI_Bcast(&size_buffer_send,
//// 1,
//// MPI_UINT64_T,
//// root,
//// MPI_COMM_WORLD);
//// message_time += WallTimer::get_time_mark();
////// {// test
////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
////// }
//// if (!size_buffer_send) {
//// continue;
//// }
//// message_time -= WallTimer::get_time_mark();
//// std::vector<E_T> buffer_recv(size_buffer_send);
//// if (host_id == root) {
//// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
//// }
//// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
//// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) {
//// // Only need 1 broadcast
////
//// MPI_Bcast(buffer_recv.data(),
//// bytes_buffer_send,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// } else {
//// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
//// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
//// size_t offset = 0;
//// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
////// size_t offset = b_i * unit_buffer_size;
//// size_t size_unit_buffer = b_i == num_unit_buffers - 1
//// ? size_buffer_send - offset
//// : unit_buffer_size;
//// MPI_Bcast(buffer_recv.data() + offset,
//// size_unit_buffer * ETypeSize,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// offset += unit_buffer_size;
//// }
//// }
//// message_time += WallTimer::get_time_mark();
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
//
// // Every host sends to others
// for (int src = 0; src < num_hosts; ++src) {
// if (host_id == src) {
// // Send from src
// message_time -= WallTimer::get_time_mark();
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, host_id);
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// }
// message_time += WallTimer::get_time_mark();
// } else {
// // Receive from src
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, src);
// if (host_id == dst) {
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
// // Every host sends (num_hosts - 1) times
// for (int hop = 1; hop < num_hosts; ++hop) {
// int src = hop_2_me_host_id(-hop);
// int dst = hop_2_me_host_id(hop);
// if (src != dst) { // Normal case
// // When host_id is odd, first receive, then send.
// if (static_cast<uint32_t>(host_id) & 1U) {
// message_time -= WallTimer::get_time_mark();
// // Receive first.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// // Send then.
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // When host_id is even, first send, then receive.
// // Send first.
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// // Receive then.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// } else { // If host_id is higher than dst, first send, then receive
// // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2.
// if (host_id < dst) {
// // Send
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Receive
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // Otherwise, if host_id is lower than dst, first receive, then send
// // Receive
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Send
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
//}
//// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// const uint32_t UNIT_BUFFER_SIZE = 16U << 20U;
// // Every host h_i broadcast to others
// for (int h_i = 0; h_i < num_hosts; ++h_i) {
// uint64_t size_buffer_send = buffer_send.size();
// // Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&size_buffer_send,
// 1,
// MPI_UINT64_T,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
//// {// test
//// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
//// }
// if (!size_buffer_send) {
// continue;
// }
// uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE;
//
// // Broadcast the buffer_send
// for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
// // Prepare the unit buffer
// message_time -= WallTimer::get_time_mark();
// size_t offset = b_i * UNIT_BUFFER_SIZE;
// size_t size_unit_buffer = b_i == num_unit_buffers - 1
// ? size_buffer_send - offset
// : UNIT_BUFFER_SIZE;
// std::vector<E_T> unit_buffer(size_unit_buffer);
// // Copy the messages from buffer_send to unit buffer.
// if (host_id == h_i) {
// unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer);
// }
// // Broadcast the unit buffer
// MPI_Bcast(unit_buffer.data(),
// MPI_Instance::get_sending_size(unit_buffer),
// MPI_CHAR,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// // Process every element of unit_buffer
// for (const E_T &e : unit_buffer) {
// fun(e);
// }
// }
// }
//}
// Function: Host root broadcasts its sending buffer to a receiving buffer.
template <VertexID BATCH_SIZE>
template <typename E_T>
inline void DistBVCPLL<BATCH_SIZE>::
one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv)
{
const size_t ETypeSize = sizeof(E_T);
uint64_t size_buffer_send = buffer_send.size();
// Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&size_buffer_send,
1,
MPI_UINT64_T,
root,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
try {
buffer_recv.resize(size_buffer_send);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("one_host_bcasts_buffer_to_buffer: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
if (!size_buffer_send) {
return;
}
// Broadcast the buffer_send
// message_time -= WallTimer::get_time_mark();
if (host_id == root) {
// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
buffer_recv.swap(buffer_send);
}
uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) {
// Only need 1 broadcast
MPI_Bcast(buffer_recv.data(),
bytes_buffer_send,
MPI_CHAR,
root,
MPI_COMM_WORLD);
} else {
const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
size_t offset = 0;
for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
size_t size_unit_buffer = b_i == num_unit_buffers - 1
? size_buffer_send - offset
: unit_buffer_size;
MPI_Bcast(buffer_recv.data() + offset,
size_unit_buffer * ETypeSize,
MPI_CHAR,
root,
MPI_COMM_WORLD);
offset += unit_buffer_size;
}
}
// message_time += WallTimer::get_time_mark();
}
}
#endif //PADO_DPADO_H
|
stencil_opt2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "malloc2D.h"
#include "timer.h"
#define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp)
int main(int argc, char *argv[])
{
#pragma omp parallel
if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total;
double init_time, flush_time, stencil_time, total_time;
int imax=2002, jmax = 2002;
double** xtmp;
double** x = malloc2D(jmax, imax);
double** xnew = malloc2D(jmax, imax);
int *flush = (int *)malloc(jmax*imax*sizeof(int)*4);
cpu_timer_start(&tstart_total);
cpu_timer_start(&tstart_init);
#pragma omp parallel for
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp parallel for
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
init_time += cpu_timer_stop(tstart_init);
for (int iter = 0; iter < 10000; iter++){
cpu_timer_start(&tstart_flush);
#pragma omp parallel for
for (int l = 1; l < jmax*imax*4; l++){
flush[l] = 1.0;
}
flush_time += cpu_timer_stop(tstart_flush);
cpu_timer_start(&tstart_stencil);
#pragma omp parallel for
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
stencil_time += cpu_timer_stop(tstart_stencil);
SWAP_PTR(xnew, x, xtmp);
if (iter%1000 == 0) printf("Iter %d\n",iter);
}
total_time += cpu_timer_stop(tstart_total);
printf("Timing is init %f flush %f stencil %f total %f\n",
init_time,flush_time,stencil_time,total_time);
free(x);
free(xnew);
free(flush);
}
|
imd_forces_eam2.c |
/******************************************************************************
*
* IMD -- The ITAP Molecular Dynamics Program
*
* Copyright 1996-2004 Institute for Theoretical and Applied Physics,
* University of Stuttgart, D-70550 Stuttgart
*
******************************************************************************/
/******************************************************************************
*
* do_forces for ASYMPOT, and second force loop for EAM2
*
******************************************************************************/
/******************************************************************************
* $Revision$
* $Date$
******************************************************************************/
#include "imd.h"
#include "potaccess.h"
/******************************************************************************
*
* special version of do_forces for asymmetric core potentials
*
******************************************************************************/
#ifdef ASYMPOT
void do_forces(cell *p, cell *q, vektor pbc, real *Epot, real *Virial,
real *Vir_xx, real *Vir_yy, real *Vir_zz,
real *Vir_yz, real *Vir_zx, real *Vir_xy)
{
int i,j,k;
vektor d;
vektor tmp_d;
vektor force;
real r2, rho_h;
real tmp_virial;
real pot_zwi, pot_grad;
int col1, col2, is_short=0, inc = ntypes * ntypes;
int jstart, q_typ, p_typ;
real *qptr, *pfptr, *qfptr, *qpdptr, *ppdptr, *qpoptr, *ppoptr;
tmp_virial = 0.0;
/* for each atom in first cell */
for (i=0; i<p->n; ++i) {
tmp_d.x = ORT(p,i,X) - pbc.x;
tmp_d.y = ORT(p,i,Y) - pbc.y;
tmp_d.z = ORT(p,i,Z) - pbc.z;
p_typ = SORTE(p,i);
jstart = (((p==q) && (pbc.x==0) && (pbc.y==0) && (pbc.z==0)) ? i+1 : 0);
qptr = &ORT(q,jstart,X);
/* for each atom in neighbouring cell */
for (j = jstart; j < q->n; ++j) {
/* calculate distance */
d.x = *qptr - tmp_d.x; ++qptr;
d.y = *qptr - tmp_d.y; ++qptr;
d.z = *qptr - tmp_d.z; ++qptr;
q_typ = SORTE(q,j);
col1 = p_typ * ntypes + q_typ;
col2 = q_typ * ntypes + p_typ;
r2 = SPROD(d,d);
#ifdef DEBUG
if (0==r2) { char msgbuf[256];
sprintf(msgbuf, "Distance is zero between particles %d and %d!\n",
NUMMER(p,i), NUMMER(q,j));
error(msgbuf);
}
#endif
/* compute pair interactions, first on particle i */
if (r2 <= pair_pot.end[col1]) {
PAIR_INT(pot_zwi, pot_grad, pair_pot, col1, inc, r2, is_short)
/* store force in temporary variable */
force.x = d.x * pot_grad;
force.y = d.y * pot_grad;
force.z = d.z * pot_grad;
/* accumulate forces */
pfptr = &KRAFT(p,i,X);
*pfptr += force.x;
*(++pfptr) += force.y;
*(++pfptr) += force.z;
/* the first half of the pot. energy of this bond */
pot_zwi *= 0.5;
*Epot += pot_zwi;
POTENG(p,i) += pot_zwi;
/* for the virial, we take the mean forces on the two particles */
force.x *= 0.5;
force.y *= 0.5;
force.z *= 0.5;
pot_grad *= 0.5;
tmp_virial -= r2 * pot_grad;
#ifdef STRESS_TENS
if (do_press_calc) {
PRESSTENS(p,i,xx) -= d.x * force.x;
PRESSTENS(p,i,yy) -= d.y * force.y;
PRESSTENS(p,i,zz) -= d.z * force.z;
PRESSTENS(p,i,yz) -= d.y * force.z;
PRESSTENS(p,i,zx) -= d.z * force.x;
PRESSTENS(p,i,xy) -= d.x * force.y;
}
#endif
}
/* compute pair interactions, now on particle j */
if (r2 <= pair_pot.end[col2]) {
if (col1!=col2) {
PAIR_INT(pot_zwi, pot_grad, pair_pot, col2, inc, r2, is_short);
}
/* store force in temporary variable */
force.x = d.x * pot_grad;
force.y = d.y * pot_grad;
force.z = d.z * pot_grad;
/* accumulate forces */
qfptr = &KRAFT(q,j,X);
*qfptr -= force.x;
*(++qfptr) -= force.y;
*(++qfptr) -= force.z;
/* the second half of the pot. energy of this bond */
pot_zwi *= 0.5;
*Epot += pot_zwi;
POTENG(q,j) += pot_zwi;
/* for the virial, we take the mean forces on the two particles */
force.x *= 0.5;
force.y *= 0.5;
force.z *= 0.5;
pot_grad *= 0.5;
tmp_virial -= r2 * pot_grad;
#ifdef STRESS_TENS
if (do_press_calc) {
PRESSTENS(q,j,xx) -= d.x * force.x;
PRESSTENS(q,j,yy) -= d.y * force.y;
PRESSTENS(q,j,zz) -= d.z * force.z;
PRESSTENS(q,j,yz) -= d.y * force.z;
PRESSTENS(q,j,zx) -= d.z * force.x;
PRESSTENS(q,j,xy) -= d.x * force.y;
}
#endif
}
/* compute host electron density */
if (r2 < rho_h_tab.end[col1]) {
VAL_FUNC(rho_h, rho_h_tab, col1, inc, r2, is_short);
EAM_RHO(p,i) += rho_h;
#ifdef EEAM
EAM_P(p,i) += rho_h*rho_h;
#endif
}
if (r2 < rho_h_tab.end[col2]) {
if (col1!=col2) {
VAL_FUNC(rho_h, rho_h_tab, col2, inc, r2, is_short);
}
EAM_RHO(q,j) += rho_h;
#ifdef EEAM
EAM_P(q,j) += rho_h*rho_h;
#endif
}
} /* for j */
} /* for i */
if (is_short==1)
{
printf("\nproc:%d,steps:%d,Short distance\n",myid,steps);
}
*Virial += tmp_virial;
}
#endif /* ASYMPOT */
/******************************************************************************
*
* compute embedding energy and its derivative for all atoms
*
******************************************************************************/
void do_embedding_energy(void)
{
int k;
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) reduction(+:tot_pot_energy)
#endif
for (k=0; k<NCELLS; k++) {
int i, idummy=0;
real pot;
cell *p;
p = CELLPTR(k);
for (i=0; i<p->n; i++) {
PAIR_INT( pot, EAM_DF(p,i), embed_pot, SORTE(p,i),
ntypes, EAM_RHO(p,i), idummy);
POTENG(p,i) += pot;
tot_pot_energy += pot;
#ifdef EEAM
PAIR_INT( pot, EAM_DM(p,i), emod_pot, SORTE(p,i),
ntypes, EAM_P(p,i), idummy);
POTENG(p,i) += pot;
tot_pot_energy += pot;
#endif
}
}
}
/******************************************************************************
*
* second force loop, calculates the force and the energy
* caused by the embedding electron density
* uses Phi(r2), Rho(r2), F(rho) and its derivatives
* also used for EEAM
*
******************************************************************************/
void do_forces_eam2(cell *p, cell *q, vektor pbc, real *Virial,
real *Vir_xx, real *Vir_yy, real *Vir_zz,
real *Vir_yz, real *Vir_zx, real *Vir_xy)
{
int i,j,k,same_cell;
vektor d, tmp_d, force;
real r2;
int is_short=0, idummy=0;
int jstart, q_typ, p_typ;
int col1, col2, inc=ntypes*ntypes;
real *qptr, *pfptr, *qfptr, *qpdptr, *ppdptr, *qpoptr, *ppoptr;
real tmp_virial=0.0;
real eam2_force, rho_i_strich, rho_j_strich;
#ifdef EEAM
real rho_i, rho_j;
#endif
/* for each atom in first cell */
for (i=0; i<p->n; ++i) {
tmp_d.x = ORT(p,i,X) - pbc.x;
tmp_d.y = ORT(p,i,Y) - pbc.y;
tmp_d.z = ORT(p,i,Z) - pbc.z;
p_typ = SORTE(p,i);
same_cell = ((p==q) && (pbc.x==0) && (pbc.y==0) && (pbc.z==0));
jstart = (same_cell ? i+1 : 0);
qptr = &ORT(q,jstart,X);
/* for each atom in neighbouring cell */
for (j=jstart; j<q->n; ++j) {
/* calculate distance */
d.x = *qptr - tmp_d.x; ++qptr;
d.y = *qptr - tmp_d.y; ++qptr;
d.z = *qptr - tmp_d.z; ++qptr;
q_typ = SORTE(q,j);
r2 = SPROD(d,d);
col1 = q_typ * ntypes + p_typ;
col2 = p_typ * ntypes + q_typ;
if ((r2 < rho_h_tab.end[col1]) || (r2 < rho_h_tab.end[col2])) {
/* take care: particle i gets its rho from particle j.
This is tabulated in column p_typ*ntypes+q_typ.
Here we need the giving part from column q_typ*ntypes+p_typ.
*/
/* rho_strich_i(r_ij) */
#ifndef EEAM
DERIV_FUNC(rho_i_strich, rho_h_tab, col1, inc, r2, is_short);
#else
/* rho_strich_i(r_ij) and rho_i(r_ij) */
PAIR_INT(rho_i, rho_i_strich, rho_h_tab, col1, inc, r2, is_short);
#endif
/* rho_strich_j(r_ij) */
if (col1==col2) {
rho_j_strich = rho_i_strich;
#ifdef EEAM
rho_j = rho_i;
#endif
} else {
#ifndef EEAM
DERIV_FUNC(rho_j_strich, rho_h_tab, col2, inc, r2, is_short);
#else
PAIR_INT(rho_j, rho_j_strich, rho_h_tab, col2, inc, r2, is_short);
#endif
}
/* put together (dF_i and dF_j are by 0.5 too big) */
eam2_force = 0.5 * (EAM_DF(p,i)*rho_j_strich+EAM_DF(q,j)*rho_i_strich);
#ifdef EEAM
/* 0.5 times 2 from derivative simplified to 1 */
eam2_force += (EAM_DM(p,i) * rho_j * rho_j_strich +
+ EAM_DM(q,j) * rho_i * rho_i_strich);
#endif
/* store force in temporary variable */
force.x = d.x * eam2_force;
force.y = d.y * eam2_force;
force.z = d.z * eam2_force;
/* accumulate forces */
pfptr = &KRAFT(p,i,X);
qfptr = &KRAFT(q,j,X);
*pfptr += force.x;
*qfptr -= force.x;
*(++pfptr) += force.y;
*(++qfptr) -= force.y;
*(++pfptr) += force.z;
*(++qfptr) -= force.z;
tmp_virial -= r2 * eam2_force;
#ifdef STRESS_TENS
if (do_press_calc) {
/* avoid double counting of the virial */
force.x *= 0.5;
force.y *= 0.5;
force.z *= 0.5;
PRESSTENS(p,i,xx) -= d.x * force.x;
PRESSTENS(p,i,yy) -= d.y * force.y;
PRESSTENS(p,i,zz) -= d.z * force.z;
PRESSTENS(p,i,yz) -= d.y * force.z;
PRESSTENS(p,i,zx) -= d.z * force.x;
PRESSTENS(p,i,xy) -= d.x * force.y;
PRESSTENS(q,j,xx) -= d.x * force.x;
PRESSTENS(q,j,yy) -= d.y * force.y;
PRESSTENS(q,j,zz) -= d.z * force.z;
PRESSTENS(q,j,yz) -= d.y * force.z;
PRESSTENS(q,j,zx) -= d.z * force.x;
PRESSTENS(q,j,xy) -= d.x * force.y;
}
#endif
} /* if in the cutoff range */
} /* for j */
} /* for i */
/* print warning if short distance occurred */
/*MY MOD: Hier stand fprintf(stderr,...) */
if (is_short==1) printf("\nproc:%d,steps:%d,Short distance!\n",myid,steps);
*Virial += tmp_virial;
} /* do_forces_eam2 */
|
trsm_x_bsr_n_lo_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
const ALPHA_INT bs = A->block_size;
ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*bs*sizeof(ALPHA_Number));
const ALPHA_INT m = A->rows*bs;
const ALPHA_INT n = A->cols*bs;
memset(diag, '\0', m * sizeof(ALPHA_Number));
const ALPHA_INT bs2 = bs * bs;
const ALPHA_INT b_rows = m / bs;
const ALPHA_INT b_cols = n / bs;
const alphasparse_layout_t block_layout = A->block_layout;
if(block_layout != ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
printf("layout not consistent!!!\n");
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT br = 0 ; br < b_rows; br++){
for(ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++){
ALPHA_INT bc = A->col_indx[ai];
if(bc == br){
for(ALPHA_INT b_row = 0 ; b_row < bs ; b_row++){
diag[index2(br,b_row,bs)] = A->values[ai * bs2 + b_row *(bs + 1)];
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number));
const ALPHA_INT y0_offset = out_y_col * ldy;
const ALPHA_INT x0_offset = out_y_col * ldx;
for (ALPHA_INT br = 0; br < b_rows; br++)
{
for(ALPHA_INT i = 0 ; i < bs ; i++){
alpha_setzero(temp[i]);
}
ALPHA_INT diagBlock = -1;
for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
if(bc < br)
//col-major
for(ALPHA_INT col = 0; col < bs; col++)
{
//all entities belongs to upper triangle
ALPHA_INT y_offset = y0_offset + bc * bs + col;
ALPHA_INT a0_offset = ai * bs2 + col * bs;
for(ALPHA_INT row = 0 ; row < bs ; row++)
{
ALPHA_INT ele_offset = a0_offset + row;
alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]);
}
}
//diagonal must be none-zero block
if( bc==br ){
diagBlock = ai;
}
}
if(diagBlock == -1)
{
printf("lhs matrix invalid for trsm!!!\n");
exit(-1);
}
//col-major
//top-left most
for(ALPHA_INT col = 0; col < bs; col++)
{
//upper triangle of block
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t,alpha,x[x0_offset + br * bs + col]);
alpha_sub(t,t,temp[col]);
alpha_div(y[y0_offset + br * bs + col],t,diag[col + br * bs]);
for(ALPHA_INT row = col + 1; row < bs; row++){
alpha_madde(temp[row], A->values[ diagBlock * bs2 + col * bs + row],y[y0_offset + br * bs + col ]);
}
}
}
alpha_free(temp);
}
alpha_free(diag);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
DRB010-lastprivatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This loop has loop-carried output-dependence due to x=... at line 63.
The problem can be solved by using lastprivate(x) .
Data race pair: x@63:5 vs. x@63:5
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int x;
int len = 10000;
if (argc > 1)
len = atoi(argv[1]);
#pragma omp parallel for private (i) lastprivate (x) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
x = i;
}
printf("x=%d",x);
return 0;
}
|
OnDiscMSExperiment.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2020.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/INTERFACES/DataStructures.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h>
#include <vector>
#include <algorithm>
#include <limits>
#include <boost/shared_ptr.hpp>
namespace OpenMS
{
/**
@brief Representation of a mass spectrometry experiment on disk.
@ingroup Kernel
@note This implementation is @a not thread-safe since it keeps internally a
single file access pointer which it moves when accessing a specific
data item. Please provide a separate copy to each thread, e.g.
@code
#pragma omp parallel for firstprivate(ondisc_map)
@endcode
*/
class OPENMS_DLLAPI OnDiscMSExperiment
{
typedef ChromatogramPeak ChromatogramPeakT;
typedef Peak1D PeakT;
public:
/**
@brief Constructor
This initializes the object, use openFile to open a file.
*/
OnDiscMSExperiment() {}
/**
@brief Open a specific file on disk.
This tries to read the indexed mzML by parsing the index and then reading
the meta information into memory.
@return Whether the parsing of the file was successful (if false, the
file most likely was not an indexed mzML file)
*/
bool openFile(const String& filename, bool skipMetaData = false)
{
filename_ = filename;
indexed_mzml_file_.openFile(filename);
if (filename != "" && !skipMetaData)
{
loadMetaData_(filename);
}
return indexed_mzml_file_.getParsingSuccess();
}
/// Copy constructor
OnDiscMSExperiment(const OnDiscMSExperiment& source) :
filename_(source.filename_),
indexed_mzml_file_(source.indexed_mzml_file_),
meta_ms_experiment_(source.meta_ms_experiment_)
{
}
/**
@brief Equality operator
This only checks whether the underlying file is the same and the parsed
meta-information is the same. Note that the file reader (e.g. the
std::ifstream of the file) might be in a different state.
*/
bool operator==(const OnDiscMSExperiment& rhs) const
{
if (meta_ms_experiment_ == nullptr || rhs.meta_ms_experiment_ == nullptr)
{
return filename_ == rhs.filename_ &&
meta_ms_experiment_ == rhs.meta_ms_experiment_;
}
// check if file and meta information is the same
return filename_ == rhs.filename_ &&
(*meta_ms_experiment_) == (*rhs.meta_ms_experiment_);
// do not check if indexed_mzml_file_ is equal -> they have the same filename...
}
/// Inequality operator
bool operator!=(const OnDiscMSExperiment& rhs) const
{
return !(operator==(rhs));
}
/**
@brief Checks if all spectra are sorted with respect to ascending RT
Note that we cannot check whether all spectra are sorted (except if we
were to load them all and check).
*/
bool isSortedByRT() const
{
if (!meta_ms_experiment_) return false;
return meta_ms_experiment_->isSorted(false);
}
/// alias for getNrSpectra
inline Size size() const
{
return getNrSpectra();
}
/// returns whether spectra are empty
inline bool empty() const
{
return getNrSpectra() == 0;
}
/// get the total number of spectra available
inline Size getNrSpectra() const
{
return indexed_mzml_file_.getNrSpectra();
}
/// get the total number of chromatograms available
inline Size getNrChromatograms() const
{
return indexed_mzml_file_.getNrChromatograms();
}
/// returns the meta information of this experiment (const access)
boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const
{
return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_);
}
boost::shared_ptr<PeakMap> getMetaData() const
{
return meta_ms_experiment_;
}
/// alias for getSpectrum
inline MSSpectrum operator[](Size n)
{
return getSpectrum(n);
}
/**
@brief returns a single spectrum
@param id The index of the spectrum
*/
MSSpectrum getSpectrum(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSSpectrumById(int(id));
MSSpectrum spectrum(meta_ms_experiment_->operator[](id));
indexed_mzml_file_.getMSSpectrumById(int(id), spectrum);
return spectrum;
}
/**
@brief returns a single spectrum
*/
OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id)
{
return indexed_mzml_file_.getSpectrumById((int)id);
}
/**
@brief returns a single chromatogram
@param id The index of the chromatogram
*/
MSChromatogram getChromatogram(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSChromatogramById(int(id));
MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id));
indexed_mzml_file_.getMSChromatogramById(int(id), chromatogram);
return chromatogram;
}
/**
@brief returns a single chromatogram
@param id The native identifier of the chromatogram
*/
MSChromatogram getChromatogramByNativeId(const std::string& id);
/**
@brief returns a single spectrum
@param id The native identifier of the spectrum
*/
MSSpectrum getSpectrumByNativeId(const std::string& id);
/**
@brief returns a single chromatogram
*/
OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id)
{
return indexed_mzml_file_.getChromatogramById(id);
}
/// sets whether to skip some XML checks and be fast instead
void setSkipXMLChecks(bool skip)
{
indexed_mzml_file_.setSkipXMLChecks(skip);
}
private:
/// Private Assignment operator -> we cannot copy file streams in IndexedMzMLHandler
OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */);
void loadMetaData_(const String& filename);
MSChromatogram getMetaChromatogramById_(const std::string& id);
MSSpectrum getMetaSpectrumById_(const std::string& id);
protected:
/// The filename of the underlying data file
String filename_;
/// The index of the underlying data file
Internal::IndexedMzMLHandler indexed_mzml_file_;
/// The meta-data
boost::shared_ptr<PeakMap> meta_ms_experiment_;
/// Mapping of chromatogram native ids to offsets
std::unordered_map< std::string, Size > chromatograms_native_ids_;
/// Mapping of spectra native ids to offsets
std::unordered_map< std::string, Size > spectra_native_ids_;
};
typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap;
} // namespace OpenMS
|
pre_processing.h | #pragma once
#include "util/primitives/primitives.h"
#include "util/graph/graph.h"
#ifndef NO_ATOMIC
#include "util/ips4o/ips4o.hpp"
#endif
template<typename T, typename OFF>
T RemoveDuplicates(pair<T, T> *&edge_lst, OFF &num_edges, pair<T, T> *&edge_lst_buffer) {
using Edge = pair<T, T>;
Timer timer;
T max_node_id = 0;
T num_buckets;
auto max_omp_threads = omp_get_max_threads();
OFF *bucket_ptrs;
OFF *cur_write_off;
vector<OFF> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
#pragma omp for reduction(max: max_node_id)
for (OFF i = 0u; i < num_edges; i++) {
if (edge_lst[i].first > edge_lst[i].second) {
swap(edge_lst[i].first, edge_lst[i].second);
}
max_node_id = max(max_node_id, max(edge_lst[i].first, edge_lst[i].second));
}
#pragma omp single
{
num_buckets = max_node_id + 1;
}
// Partition.
BucketSort(histogram, edge_lst, edge_lst_buffer, cur_write_off, bucket_ptrs, num_edges, num_buckets,
[&edge_lst](size_t i) {
return edge_lst[i].first;
}, &timer);
// Sort.
#pragma omp for schedule(dynamic, 600)
for (auto i = 0; i < num_buckets; i++) {
sort(edge_lst_buffer + bucket_ptrs[i], edge_lst_buffer + bucket_ptrs[i + 1],
[](const Edge &left, const Edge &right) {
return left.second < right.second;
});
}
}
swap(edge_lst, edge_lst_buffer);
free(cur_write_off);
free(bucket_ptrs);
log_info("Finish Sort, %.9lfs", timer.elapsed());
// Selection.
auto *relative_off = (OFF *) malloc(sizeof(OFF) * num_edges);
#pragma omp parallel num_threads(max_omp_threads)
{
SelectNotFOMP(histogram, edge_lst_buffer, edge_lst, relative_off, num_edges, [edge_lst](size_t it) {
return edge_lst[it].first == edge_lst[it].second || (it > 0 && edge_lst[it - 1] == edge_lst[it]);
});
}
swap(edge_lst, edge_lst_buffer);
num_edges = num_edges - relative_off[num_edges - 1];
free(relative_off);
log_info("New # of edges: %zu, Elapsed: %.9lfs", num_edges, timer.elapsed());
log_debug("max_node_id: %d", max_node_id);
return max_node_id;
}
template<typename T, typename D, typename I, typename OFF, typename F>
void EdgeListHistogram(I num_vertices, OFF num_edges, pair<T, T> *edge_lst, D *deg_lst, F f) {
auto local_buf = (uint8_t *) calloc(num_vertices, sizeof(uint8_t));
#pragma omp for
for (size_t i = 0u; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
local_buf[src]++;
if (local_buf[src] == 0xff) {
__sync_fetch_and_add(°_lst[src], 0xff);
local_buf[src] = 0;
}
local_buf[dst]++;
if (local_buf[dst] == 0xff) {
__sync_fetch_and_add(°_lst[dst], 0xff);
local_buf[dst] = 0;
}
}
}
for (size_t i = 0u; i < num_vertices; i++) {
// atomic add for edge.first
if (local_buf[i] > 0)
__sync_fetch_and_add(&(deg_lst[i]), local_buf[i]);
}
free(local_buf);
#pragma omp barrier
}
template<typename T, typename D, typename I, typename OFF>
void EdgeListHistogram(I num_vertices, OFF num_edges, pair<T, T> *edge_lst, D *deg_lst) {
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst, [](size_t it) {
return true;
});
}
template<typename T, typename OFF>
void ConvertEdgeListToCSR(OFF num_edges, pair<T, T> *edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off,
int32_t *&adj_lst, int max_omp_threads) {
Timer convert_timer;
deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
auto cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
vector<OFF> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
MemSetOMP(deg_lst, 0, num_vertices + 1);
MemSetOMP(off, 0, num_vertices + 1);
#pragma omp single
log_info("[%s]: InitTime: %.9lf s", __FUNCTION__, convert_timer.elapsed());
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst);
#pragma omp single
log_info("[%s]: Histogram Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// PrefixSum.
InclusivePrefixSumOMP(histogram, off + 1, num_vertices, [°_lst](uint32_t it) {
return deg_lst[it];
});
MemCpyOMP(cur_write_off, off, num_vertices + 1);
// Scatter.
#pragma omp single
{
if (adj_lst == nullptr) {
log_info("Allocate Inside (adj_lst)...");
adj_lst = (int32_t *) malloc(sizeof(int32_t) * off[num_vertices]);
}
log_info("[%s]: PrefixSum Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
#pragma omp for
for (size_t i = 0; i < num_edges; i++) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
auto old_offset = __sync_fetch_and_add(&(cur_write_off[src]), 1);
adj_lst[old_offset] = dst;
old_offset = __sync_fetch_and_add(&(cur_write_off[dst]), 1);
adj_lst[old_offset] = src;
}
}
free(cur_write_off);
log_info("[%s]: Total Conversion Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
inline void Reorder(graph_t &g, vector<int32_t> &new_vid_dict, vector<int32_t> &old_vid_dict, int32_t *&new_adj) {
Timer timer;
new_vid_dict = vector<int32_t>(g.n);
using row_ptr_t = uint32_t ;
vector<row_ptr_t> new_off(g.n + 1);
new_off[0] = 0;
auto max_omp_threads = omp_get_max_threads();
auto histogram = vector<row_ptr_t>((max_omp_threads + 1) * CACHE_LINE_ENTRY, 0);
#pragma omp parallel num_threads(max_omp_threads)
{
// 1st CSR: new_off, new_adj
#pragma omp for
for (auto i = 0; i < g.n; i++) {
new_vid_dict[old_vid_dict[i]] = i;
}
InclusivePrefixSumOMP(histogram, &new_off.front() + 1, g.n, [&g, &old_vid_dict](uint32_t new_id) {
auto vertex = old_vid_dict[new_id];
return g.num_edges[vertex + 1] - g.num_edges[vertex];
});
#pragma omp single
log_info("[%s]: Finish PrefixSum Time: %.9lf s", __FUNCTION__, timer.elapsed_and_reset());
// 2nd Parallel Transform
#pragma omp for schedule(dynamic, 100)
for (auto i = 0; i < g.n; i++) {
auto origin_i = old_vid_dict[i];
// transform
auto cur_idx = new_off[i];
for (auto my_old_off = g.num_edges[origin_i]; my_old_off < g.num_edges[origin_i + 1]; my_old_off++) {
new_adj[cur_idx] = new_vid_dict[g.adj[my_old_off]];
cur_idx++;
}
// sort the local ranges
sort(new_adj + new_off[i], new_adj + new_off[i + 1]);
}
MemCpyOMP(g.num_edges, &new_off.front(), (g.n + 1));
}
swap(g.adj, new_adj);
log_info("[%s]: Finish Reorder Time: %.3lf s", __FUNCTION__, timer.elapsed());
}
inline void ReorderDegDescending(graph_t &g, vector<int32_t> &new_vid_dict, vector<int32_t> &old_vid_dict,
int32_t *&new_adj) {
Timer timer;
#ifdef NO_ATOMIC
#define USE_BUCKET_SORT
#endif
#ifdef USE_BUCKET_SORT
auto max_omp_threads = omp_get_max_threads();
auto max_deg = 0;
auto *old_vid_dict_buffer = (int32_t *) malloc(sizeof(int32_t) * g.n);
uint32_t *write_off = nullptr;
uint32_t *bucket_ptrs = nullptr;
auto histogram = vector<uint32_t>((max_omp_threads + 1) * CACHE_LINE_ENTRY, 0);
#pragma omp parallel num_threads(max_omp_threads)
{
#pragma omp for reduction(max: max_deg)
for (auto i = 0; i < g.n; i++) {
max_deg = max<int>(max_deg, g.num_edges[i + 1] - g.num_edges[i]);
}
#pragma omp single nowait
{
old_vid_dict = vector<int32_t>(g.n);
}
#pragma omp for
for (auto i = 0u; i < g.n; i++) {
old_vid_dict_buffer[i] = i;
}
auto ptr = &old_vid_dict[0];
BucketSortSmallBuckets(histogram, old_vid_dict_buffer, ptr, write_off, bucket_ptrs,
g.n, max_deg + 1, [&g, old_vid_dict_buffer, max_deg](int i) {
auto u = old_vid_dict_buffer[i];
return max_deg - (g.num_edges[u + 1] - g.num_edges[u]);
});
}
free(write_off);
free(bucket_ptrs);
free(old_vid_dict_buffer);
#else
log_info("Use parallel sort (parasort)");
old_vid_dict = vector<int32_t>(g.n);
#pragma omp parallel for
for (auto i = 0; i < g.n; i++) {
old_vid_dict[i] = i;
}
log_info("Allocation time: %.9lf s", timer.elapsed());
ips4o::parallel::sort(old_vid_dict.begin(), old_vid_dict.end(),
[&g](int l, int r) -> bool {
return g.num_edges[l + 1] - g.num_edges[l] > g.num_edges[r + 1] - g.num_edges[r];
});
#endif
log_info("Deg-descending time: %.9lf s", timer.elapsed());
Reorder(g, new_vid_dict, old_vid_dict, new_adj);
}
|
interpolate_structural_solution_for_dem_utility.h | /*
* Author: Salva Latorre and Ignasi Pouplana
*
* latorre@cimne.upc.edu
* ipouplana@cimne.upc.edu
*/
#ifndef INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
#define INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
#include "includes/variables.h"
#include <limits>
#include <iostream>
#include <iomanip>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "includes/define.h"
#include "includes/condition.h"
#include "includes/model_part.h"
#include "dem_structures_coupling_application_variables.h"
namespace Kratos {
class InterpolateStructuralSolutionForDEM {
public:
typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(InterpolateStructuralSolutionForDEM);
InterpolateStructuralSolutionForDEM() {}
virtual ~InterpolateStructuralSolutionForDEM() {}
void SaveStructuralSolution(ModelPart& r_structural_model_part) {
KRATOS_TRY
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator itNode = node_begin + i;
array_1d<double,3>& r_current_velocity = itNode->FastGetSolutionStepValue(CURRENT_STRUCTURAL_VELOCITY);
noalias(r_current_velocity) = itNode->FastGetSolutionStepValue(VELOCITY);
array_1d<double,3>& r_current_displacement = itNode->FastGetSolutionStepValue(CURRENT_STRUCTURAL_DISPLACEMENT);
noalias(r_current_displacement) = itNode->FastGetSolutionStepValue(DISPLACEMENT);
}
KRATOS_CATCH("")
}
void InterpolateStructuralSolution(ModelPart& r_structural_model_part, const double fem_delta_time, const double fem_time, const double dem_delta_time, const double dem_time) {
KRATOS_TRY
const double previous_fem_time = fem_time - fem_delta_time;
const double time_factor = (dem_time - previous_fem_time) / fem_delta_time;
const double previous_time_factor = (dem_time - dem_delta_time - previous_fem_time) / fem_delta_time;
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator it_node = node_begin + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates()
+ it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
+ (it_node->FastGetSolutionStepValue(CURRENT_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * time_factor;
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double,3>& previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY,1);
noalias(r_velocity) = previous_velocity + (it_node->FastGetSolutionStepValue(CURRENT_STRUCTURAL_VELOCITY) - previous_velocity) * time_factor;
array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
noalias(r_displacement) = it_node->Coordinates() - it_node->GetInitialPosition().Coordinates();
array_1d<double, 3> previous_coordinates;
noalias(previous_coordinates) = it_node->GetInitialPosition().Coordinates()
+ it_node->FastGetSolutionStepValue(DISPLACEMENT,1)
+ (it_node->FastGetSolutionStepValue(CURRENT_STRUCTURAL_DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT,1)) * previous_time_factor;
array_1d<double,3>& delta_displacement = it_node->FastGetSolutionStepValue(DELTA_DISPLACEMENT);
noalias(delta_displacement) = it_node->Coordinates() - previous_coordinates;
}
KRATOS_CATCH("")
}
void RestoreStructuralSolution(ModelPart& r_structural_model_part) {
KRATOS_TRY
const int NNodes = static_cast<int>(r_structural_model_part.Nodes().size());
ModelPart::NodesContainerType::iterator node_begin = r_structural_model_part.NodesBegin();
#pragma omp parallel for
for (int i = 0; i < NNodes; i++) {
ModelPart::NodesContainerType::iterator it_node = node_begin + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates() + it_node->FastGetSolutionStepValue(DISPLACEMENT);
array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
noalias(r_velocity) = it_node->FastGetSolutionStepValue(CURRENT_STRUCTURAL_VELOCITY);
array_1d<double,3>& r_displacement = it_node->FastGetSolutionStepValue(DISPLACEMENT);
noalias(r_displacement) = it_node->FastGetSolutionStepValue(CURRENT_STRUCTURAL_DISPLACEMENT);
}
KRATOS_CATCH("")
}
virtual std::string Info() const { return "";}
virtual void PrintInfo(std::ostream& rOStream) const {}
virtual void PrintData(std::ostream& rOStream) const {}
private:
InterpolateStructuralSolutionForDEM& operator= (InterpolateStructuralSolutionForDEM const& rOther);
}; // class InterpolateStructuralSolutionForDEM
} // namespace Kratos
#endif // INTERPOLATE_STRUCTURAL_SOLUTION_FOR_DEM_UTILITY_H
|
pxgstrf_synch.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*
* -- SuperLU MT routine (version 2.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 15, 1997
*
* Modified: March 20, 2013 version 2.1
*/
#include <stdio.h>
#include <math.h>
#include "slu_mt_ddefs.h"
#define SPLIT_TOP
int_t
ParallelInit(int_t n, pxgstrf_relax_t *pxgstrf_relax,
superlumt_options_t *superlumt_options,
pxgstrf_shared_t *pxgstrf_shared)
{
int_t *etree = superlumt_options->etree;
register int_t w, dad, ukids, i, j, k, rs, panel_size, relax;
register int_t P, w_top, do_split = 0;
panel_t panel_type;
int_t *panel_histo = pxgstrf_shared->Gstat->panel_histo;
register int_t nthr, concurrency, info;
Gstat_t *Gstat = pxgstrf_shared->Gstat;
#if ( MACH==SUN )
register int_t sync_type = USYNC_THREAD;
/* Set concurrency level. */
nthr = sysconf(_SC_NPROCESSORS_ONLN);
thr_setconcurrency(nthr); /* number of LWPs */
concurrency = thr_getconcurrency();
#if ( PRNTlevel==1 )
printf(".. CPUs " IFMT ", concurrency (#LWP) " IFMT ", P " IFMT "\n",
nthr, concurrency, P);
#endif
/* Initialize mutex variables. */
pxgstrf_shared->lu_locks = (mutex_t *)
SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(mutex_t));
for (i = 0; i < NO_GLU_LOCKS; ++i)
mutex_init(&pxgstrf_shared->lu_locks[i], sync_type, 0);
#elif ( MACH==DEC || MACH==PTHREAD )
pxgstrf_shared->lu_locks = (pthread_mutex_t *)
SUPERLU_MALLOC(NO_GLU_LOCKS * sizeof(pthread_mutex_t));
for (i = 0; i < NO_GLU_LOCKS; ++i)
pthread_mutex_init(&pxgstrf_shared->lu_locks[i], NULL);
#else
pxgstrf_shared->lu_locks =
(mutex_t *) SUPERLU_MALLOC( NO_GLU_LOCKS * sizeof(mutex_t) );
#endif
#if ( PRNTlevel==1 )
printf(".. ParallelInit() ... nprocs %2d\n", superlumt_options->nprocs);
#endif
pxgstrf_shared->spin_locks = intCalloc(n);
pxgstrf_shared->pan_status =
(pan_status_t *) SUPERLU_MALLOC((n+1)*sizeof(pan_status_t));
pxgstrf_shared->fb_cols = intMalloc(n+1);
panel_size = superlumt_options->panel_size;
relax = superlumt_options->relax;
w = SUPERLU_MAX(panel_size, relax) + 1;
for (i = 0; i < w; ++i) panel_histo[i] = 0;
pxgstrf_shared->num_splits = 0;
if ( (info = queue_init(&pxgstrf_shared->taskq, n)) ) {
fprintf(stderr, "ParallelInit(): " IFMT "\n", info);
SUPERLU_ABORT("queue_init fails.");
}
/* Count children of each node in the etree. */
for (i = 0; i <= n; ++i) pxgstrf_shared->pan_status[i].ukids = 0;
for (i = 0; i < n; ++i) {
dad = etree[i];
++pxgstrf_shared->pan_status[dad].ukids;
}
/* Find the panel partitions and initialize each panel's status */
#ifdef PROFILE
Gstat->num_panels = 0;
#endif
pxgstrf_shared->tasks_remain = 0;
rs = 1; /* index for the next relaxed s-node */
w_top = panel_size/2;
if ( w_top == 0 ) w_top = 1;
P = 12;
for (i = 0; i < n; ) {
if ( pxgstrf_relax[rs].fcol == i ) {
w = pxgstrf_relax[rs++].size;
panel_type = RELAXED_SNODE;
pxgstrf_shared->pan_status[i].state = CANGO;
} else {
/* Adjust panel_size so that a panel won't overlap with
the next relaxed snode. */
#if 0
/* Only works when etree is postordered. */
w = SUPERLU_MIN(panel_size, pxgstrf_relax[rs].fcol - i);
#else
w = panel_size;
for (k = i + 1; k < SUPERLU_MIN(i + panel_size, n); ++k)
if ( k == pxgstrf_relax[rs].fcol ) {
w = k - i; /* panel stops at column k-1 */
break;
}
if ( k == n ) w = n - i;
#endif
#ifdef SPLIT_TOP
if ( !do_split ) {
if ( (n-i) < panel_size * P ) do_split = 1;
}
if ( do_split && w > w_top ) { /* split large panel */
w = w_top;
++pxgstrf_shared->num_splits;
}
#endif
for (j = i+1; j < i + w; ++j)
/* Do not allow panel to cross a branch point in the etree. */
if ( pxgstrf_shared->pan_status[j].ukids > 1 ) break;
w = j - i; /* j should start a new panel */
panel_type = REGULAR_PANEL;
pxgstrf_shared->pan_status[i].state = UNREADY;
#ifdef DOMAINS
if ( in_domain[i] == TREE_DOMAIN ) panel_type = TREE_DOMAIN;
#endif
}
if ( panel_type == REGULAR_PANEL ) {
++pxgstrf_shared->tasks_remain;
/*printf("nondomain panel %6d -- %6d\n", i, i+w-1);
fflush(stdout);*/
}
ukids = k = 0;
for (j = i; j < i + w; ++j) {
pxgstrf_shared->pan_status[j].size = k--;
pxgstrf_shared->pan_status[j].type = panel_type;
ukids += pxgstrf_shared->pan_status[j].ukids;
}
pxgstrf_shared->pan_status[i].size = w; /* leading column */
/* only count those kids outside the panel */
pxgstrf_shared->pan_status[i].ukids = ukids - (w-1);
panel_histo[w]++;
#ifdef PROFILE
Gstat->panstat[i].size = w;
++Gstat->num_panels;
#endif
pxgstrf_shared->fb_cols[i] = i;
i += w; /* move to the next panel */
} /* for i ... */
/* Dummy root */
pxgstrf_shared->pan_status[n].size = 1;
pxgstrf_shared->pan_status[n].state = UNREADY;
#if ( PRNTlevel==1 )
printf(".. Split: P " IFMT ", #nondomain panels " IFMT "\n", P, pxgstrf_shared->tasks_remain);
#endif
#ifdef DOMAINS
EnqueueDomains(&pxgstrf_shared->taskq, list_head, pxgstrf_shared);
#else
EnqueueRelaxSnode(&pxgstrf_shared->taskq, n, pxgstrf_relax, pxgstrf_shared);
#endif
#if ( PRNTlevel==1 )
printf(".. # tasks " IFMT "\n", pxgstrf_shared->tasks_remain);
fflush(stdout);
#endif
#ifdef PREDICT_OPT
/* Set up structure describing children */
for (i = 0; i <= n; cp_firstkid[i++] = EMPTY);
for (i = n-1; i >= 0; i--) {
dad = etree[i];
cp_nextkid[i] = cp_firstkid[dad];
cp_firstkid[dad] = i;
}
#endif
return 0;
} /* ParallelInit */
/*
* Free the storage used by the parallel scheduling algorithm.
*/
int_t ParallelFinalize(pxgstrf_shared_t *pxgstrf_shared)
{
/* Destroy mutexes */
#if ( MACH==SUN )
register int_t i;
for (i = 0; i < NO_GLU_LOCKS; ++i)
mutex_destroy( &pxgstrf_shared->lu_locks[i] );
#elif ( MACH==DEC || MACH==PTHREAD )
register int_t i;
for (i = 0; i < NO_GLU_LOCKS; ++i)
pthread_mutex_destroy( &pxgstrf_shared->lu_locks[i] );
#endif
SUPERLU_FREE ((void*)pxgstrf_shared->lu_locks);
SUPERLU_FREE ((int_t*)pxgstrf_shared->spin_locks);
SUPERLU_FREE (pxgstrf_shared->pan_status);
SUPERLU_FREE (pxgstrf_shared->fb_cols);
SUPERLU_FREE (pxgstrf_shared->Glu->map_in_sup);
queue_destroy(&pxgstrf_shared->taskq);
#if ( PRNTlevel==1 )
printf(".. # panel splittings " IFMT "\n", pxgstrf_shared->num_splits);
#endif
return 0;
}
int_t queue_init(queue_t *q, int_t n)
{
if ( n < 1 ) return (-1);
q->queue = (qitem_t *) SUPERLU_MALLOC(n*sizeof(qitem_t));
q->count = 0;
q->head = 0;
q->tail = 0;
return 0;
}
int_t queue_destroy(queue_t *q)
{
SUPERLU_FREE( q->queue );
return 0;
}
/*
* Return value: number of items in the queue
*/
int_t Enqueue(queue_t *q, qitem_t item)
{
q->queue[q->tail++] = item;
++q->count;
return (q->count);
}
/*
* Return value: >= 0 number of items in the queue
* = -1 queue is empty
*/
int_t Dequeue(queue_t *q, qitem_t *item)
{
if ( q->count <= 0 ) return EMPTY;
*item = q->queue[q->head++];
--q->count;
return (q->count);
}
int_t QueryQueue(queue_t *q)
{
register int_t i;
printf("Queue count: " IFMT "\n", q->count);
for (i = q->head; i < q->tail; ++i) printf(IFMT "\titem " IFMT "\n", i, q->queue[i]);
return 0;
}
int_t EnqueueRelaxSnode(queue_t *q, int_t n, pxgstrf_relax_t *pxgstrf_relax,
pxgstrf_shared_t *pxgstrf_shared)
{
register int_t rs, j, m;
m = pxgstrf_relax[0].size;
for (rs = 1; rs <= m; ++rs) {
j = pxgstrf_relax[rs].fcol;
q->queue[q->tail++] = j;
q->count++;
++pxgstrf_shared->tasks_remain;
}
#if ( PRNTlevel==1 )
printf(".. EnqueueRelaxSnode(): count " IFMT "\n", q->count);
#endif
return 0;
}
/*
* Enqueue the initial independent domains.
* A pair of two numbers {root, fst_desc} is added in the queue.
*/
/*int EnqueueDomains(int P, queue_t *q, struct Branch **proc_domains_h)*/
int_t EnqueueDomains(queue_t *q, struct Branch *list_head,
pxgstrf_shared_t *pxgstrf_shared)
{
struct Branch *b, *thrash;
/* for (pnum = 0; pnum < P; ++pnum) {
for (b = proc_domains_h[pnum]; b != NULL; ) {*/
b = list_head;
while ( b ) {
thrash = b;
q->queue[q->tail++] = b->root;
q->queue[q->tail++] = b->first_desc;
q->count = q->count + 2;
STATE ( b->root ) = CANGO;
++pxgstrf_shared->tasks_remain;
b = b->next;
SUPERLU_FREE (thrash);
}
printf("EnqueueDomains(): count " IFMT "\n", q->count);
return 0;
}
int_t NewNsuper(const int_t pnum, pxgstrf_shared_t *pxgstrf_shared, int_t *data)
{
register int_t i;
mutex_t *lock = &pxgstrf_shared->lu_locks[NSUPER_LOCK];
Gstat_t *Gstat = pxgstrf_shared->Gstat;
#ifdef PROFILE
double t = SuperLU_timer_();
#endif
#if ( MACH==SUN )
mutex_lock(lock);
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_lock(lock);
#elif ( MACH==SGI || MACH==ORIGIN )
#pragma critical lock(lock)
#elif ( MACH==CRAY_PVP )
#pragma _CRI guard (*lock)
#elif ( MACH==OPENMP )
#pragma omp critical ( NSUPER_LOCK )
#endif
{
i = ++(*data);
}
#if ( MACH==SUN )
mutex_unlock(lock);
#elif ( MACH==DEC || MACH==PTHREAD )
pthread_mutex_unlock(lock);
#elif ( MACH==CRAY_PVP )
#pragma _CRI endguard (*lock)
#endif
#ifdef PROFILE
Gstat->procstat[pnum].cs_time += SuperLU_timer_() - t;
#endif
return i;
}
int_t lockon(int_t *block)
{
while ( *block ) ; /* spin-wait */
*block = 1;
return 0;
}
int_t lockoff(int_t *block)
{
*block = 0;
return 0;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((quantize_info->dither_method == NoDitherMethod) &&
(image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
dvr.h | // name your method
#ifndef METHODS_DVR_H
#define METHODS_DVR_H
// include only the necessary header files
#include "cwa.h"
#include "quartz_internal/propagate.h"
#include "details/math/polynomial.h"
#include "details/math/constants.h"
#include "details/math/space.h"
#include "util/member_function_wrapper.h"
namespace method {
// use your method name to create a subspace for your
// implementation of details
namespace dvr {
namespace details {
inline
cx_double kinetic_matrix_element(const long long i,
const long long j,
const double interval,
const double mass) {
if (i == j) {
return cx_double{
math::pi * math::pi / 6. / mass / interval / interval, 0.};
} else {
return cx_double{
std::pow(-1, i - j) / mass / interval / interval / (double) (i - j) /
(double) (i - j), 0.};
}
}
inline
cx_double momentum_matrix_element(const long long i,
const long long j,
const double interval) {
if (i == j) {
return cx_double{0., 0.};
} else {
return -cx_double{0., std::pow(-1, i - j) / interval / (i - j)};
}
}
inline
arma::cx_cube momentum_matrices(const arma::uvec & grid,
const arma::mat & ranges,
const arma::uword dim) {
const long long narrowed_dim = arma::prod(grid);
arma::cx_cube result = arma::cx_cube(narrowed_dim, narrowed_dim, dim);
const arma::vec intervals = (ranges.col(1) - ranges.col(0))
/ (grid - arma::ones(grid.n_elem));
const auto table = math::space::grids_to_table(grid);
#pragma omp parallel for
for (arma::uword k = 0; k < dim; k++) {
for (long long i = 0; i < narrowed_dim; i++) {
for (long long j = 0; j < narrowed_dim; j++) {
result(i, j, k) = momentum_matrix_element(
math::space::index_to_indices(i, table)[k],
math::space::index_to_indices(j, table)[k],
intervals(k));
}
}
}
return result;
}
inline
arma::cube position_matrices(const arma::mat & points) {
arma::cube result = arma::cube(points.n_cols, points.n_cols, points.n_rows);
#pragma omp parallel for
for (arma::uword i = 0; i < points.n_rows; i++) {
result.slice(i) = arma::diagmat(points.row(i));
}
return result;
}
} // namespace details
struct State {
public:
arma::mat points;
arma::cx_vec coefs;
arma::uvec grid;
arma::mat ranges;
arma::vec masses;
arma::cube positional_matrices;
arma::cx_cube momentum_matrices;
// Establish an easy way to construct your State
template<typename Wavefunction>
State(const Wavefunction & initial,
const arma::uvec & grid,
const arma::mat & range,
const arma::vec & masses) :
points(math::space::points_generate(grid, range)),
coefs(arma::conv_to<arma::cx_vec>::from(at(initial, points))),
grid(grid),
ranges(range),
masses(masses),
positional_matrices(dvr::details::position_matrices(points)),
momentum_matrices(
dvr::details::momentum_matrices(grid, range, range.n_rows)) {
if (grid.n_rows != ranges.n_rows) {
throw Error("Different dimension between the grid and the range");
}
if (grid.n_rows != masses.n_rows) {
throw Error("Different dimension between the grid and the masses");
}
}
template<typename Wavefunction>
State(const Wavefunction & initial,
const arma::uvec & grid,
const arma::mat & range) :
points(math::space::points_generate(grid, range)),
coefs(arma::conv_to<arma::cx_vec>::from(at(initial, points))),
grid(grid),
ranges(range),
masses(arma::ones<arma::vec>(grid.n_elem)),
positional_matrices(dvr::details::position_matrices(points)),
momentum_matrices(
dvr::details::momentum_matrices(grid, range, range.n_rows)) {
if (grid.n_rows != ranges.n_rows) {
throw Error("Different dimension between the grid and the range");
}
}
inline
State(const arma::cx_vec & coefs,
const arma::uvec & grid,
const arma::mat & range) :
coefs(coefs),
grid(grid),
ranges(range),
masses(arma::ones<arma::vec>(grid.n_elem)),
positional_matrices(dvr::details::position_matrices(points)),
momentum_matrices(
dvr::details::momentum_matrices(grid, range, range.n_rows)) {
if (grid.n_rows != ranges.n_rows) {
throw Error("Different dimension between the grid and the range");
}
}
inline
arma::cx_mat kinetic_energy_matrix() const {
const long long narrowed_dim = arma::prod(this->grid);
arma::cx_mat result = arma::zeros<arma::cx_mat>(narrowed_dim, narrowed_dim);
const arma::vec intervals = (this->ranges.col(1) - this->ranges.col(0))
/ (this->grid - arma::ones(this->grid.n_elem));
const auto table = math::space::grids_to_table(this->grid);
#pragma omp parallel for
for (long long i = 0; i < narrowed_dim; i++) {
for (long long j = 0; j < narrowed_dim; j++) {
for (arma::uword k = 0; k < this->grid.n_elem; k++) {
result(i, j) += details::kinetic_matrix_element(
math::space::index_to_indices(i, table)[k],
math::space::index_to_indices(j, table)[k],
intervals(k),
this->masses(k));
}
}
}
return result;
}
template<typename Potential>
arma::cx_mat hamiltonian_matrix(const Potential & potential) const {
const arma::vec potential_diag = at(potential, this->points);
return this->kinetic_energy_matrix() + arma::diagmat(potential_diag);
}
inline
arma::vec positional_expectation() const {
arma::vec result = arma::vec(this->dim());
#pragma omp parallel for
for (arma::uword i = 0; i < result.n_elem; i++) {
const cx_double dimension_result =
arma::cdot(this->coefs,
this->positional_matrices.slice(i) * this->coefs);
assert(std::abs(dimension_result.imag()) < 1e-6);
result(i) = std::real(dimension_result);
}
return result / this->norm() / this->norm();
}
inline
arma::vec momentum_expectation() const {
arma::vec result = arma::vec(this->dim());
#pragma omp parallel for
for (arma::uword i = 0; i < result.n_elem; i++) {
const cx_double dimension_result =
arma::cdot(this->coefs,
this->momentum_matrices.slice(i) * this->coefs);
assert(std::abs(dimension_result.imag()) < 1e-6);
result(i) = std::real(dimension_result);
}
return result / this->norm() / this->norm();
}
inline
cwa::State wigner_transform(
const arma::uvec & momentum_space_grid,
const arma::mat & momentum_space_ranges) const {
if(momentum_space_grid.n_elem != momentum_space_ranges.n_rows) {
throw Error("Different dimension between the grid and range provided");
}
const arma::uvec phase_space_grid = arma::join_cols(this->grid, momentum_space_grid);
const arma::mat phase_space_range = arma::join_cols(this->ranges,
momentum_space_ranges);
const arma::uvec phase_space_table = math::space::grids_to_table(
phase_space_grid);
const arma::uvec real_space_table = math::space::grids_to_table(this->grid);
const arma::umat phase_space_iterations =
math::space::auto_iteration_over_dims(phase_space_grid);
const arma::umat Y_iterations =
math::space::auto_iteration_over_dims(this->grid / 2 + 1);
const arma::mat phase_space_points =
math::space::points_generate(phase_space_grid, phase_space_range);
const arma::vec scaling =
(phase_space_range.col(1) - phase_space_range.col(0)) / (phase_space_grid - 1);
arma::vec weights(phase_space_points.n_cols, arma::fill::zeros);
#pragma omp parallel for
for (arma::uword i = 0; i < weights.n_elem; i++) {
const arma::uvec X = phase_space_iterations.col(i).rows(0, this->dim()-1);
const arma::vec P = phase_space_points.col(i).rows(this->dim(), 2*this->dim()-1);
for (arma::uword j = 0; j < Y_iterations.n_cols; j++) {
const arma::uvec Y = Y_iterations.col(j);
const arma::vec Y_num = Y % scaling.rows(0, this->dim() - 1);
const arma::uvec X_less_than_Y = arma::find(X<Y);
const arma::uvec X_plus_Y_greater_than_grid = arma::find(X + Y > this->grid - 1);
if(X_less_than_Y.n_elem == 0 && X_plus_Y_greater_than_grid.n_elem == 0) {
const arma::uvec X_minus_Y = X-Y;
const arma::uvec X_plus_Y = X+Y;
const arma::uword X_minus_Y_index =
math::space::indices_to_index(X_minus_Y,real_space_table);
const arma::uword X_plus_Y_index =
math::space::indices_to_index(X_plus_Y,real_space_table);
const arma::uvec non_zero_Y = arma::find(Y);
if(non_zero_Y.n_elem == 0) {
const double term = std::real(
std::exp(- 2.0 * cx_double{0.0,1.0} * arma::dot(P,Y_num)) *
std::conj(this->coefs(X_minus_Y_index)) * this->coefs(X_plus_Y_index));
weights(i) += term / std::pow(2.0 * math::pi, this->dim());
}
else {
const double term = 2.0 * std::real(
std::exp(- 2.0 * cx_double{0.0,1.0} * arma::dot(P,Y_num)) *
std::conj(this->coefs(X_minus_Y_index)) * this->coefs(X_plus_Y_index));
weights(i) += term / std::pow(2.0 * math::pi, this->dim());
}
}
}
}
return cwa::State(phase_space_points, weights, this->masses);
}
template<typename Function>
arma::vec expectation(const std::vector<Function> & observables) const {
const cwa::State transformed = this->wigner_transform();
arma::vec result = transformed.expectation(observables);
return result;
}
template<typename Function>
double expectation(const Function & observable) const {
const cwa::State transformed = this->wigner_transform();
const double result = transformed.expectation(observable);
return result;
}
inline
cwa::State wigner_transform() const {
return this->wigner_transform(this->grid, this->ranges);
}
inline
arma::uword dim() const {
return this->grid.n_elem;
}
inline
double norm() const {
return arma::norm(this->coefs);
}
};
struct Operator {
private:
PropagationType type = Schrotinger;
public:
arma::Mat<cx_double> hamiltonian;
template<typename Potential>
Operator(const State & state,
const Potential & potential) :
hamiltonian(state.hamiltonian_matrix(potential)) {}
template<typename T>
Operator(const arma::Mat<T> & operator_matrix) :
hamiltonian(arma::conv_to<arma::cx_mat>::from(operator_matrix)) {}
inline
PropagationType propagation_type() const {
return Schrotinger;
}
State operator()(State state) const {
state.coefs = this->hamiltonian * state.coefs;
return state;
}
Operator operator+(const Operator & B) const {
const arma::cx_mat new_mat = this->hamiltonian + B.hamiltonian;
return Operator(new_mat);
}
Operator operator-(const Operator & B) const {
const arma::cx_mat new_mat = this->hamiltonian - B.hamiltonian;
return Operator(new_mat);
}
Operator operator*(const Operator & B) const {
const arma::cx_mat new_mat = this->hamiltonian * B.hamiltonian;
return Operator(new_mat);
}
template<typename T>
Operator operator*(const T & B) const {
const arma::cx_mat new_mat = this->hamiltonian * B;
return Operator(new_mat);
}
inline
Operator inv() const {
const arma::cx_mat inversed = arma::inv(this->hamiltonian);
return Operator(inversed);
}
};
} // namespace dvr
}
#endif //METHODS_DVR_H |
no_option.c | // RUN: %clang_cc1 -verify -o - %s
// expected-no-diagnostics
int a;
#pragma omp threadprivate(a,b)
#pragma omp parallel
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case ReadMaskPixelChannel:
{
destination_image->read_mask=MagickTrue;
break;
}
case WriteMaskPixelChannel:
{
destination_image->write_mask=MagickTrue;
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask | (1 << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
(void) SetImageColorspace(combine_image,colorspace == UndefinedColorspace ?
sRGBColorspace : colorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
register ssize_t
x;
PixelChannel channel=GetPixelChannelChannel(combine_image,i);
PixelTrait traits=GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelGray(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->alpha_trait=UndefinedPixelTrait;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
SetPixelBackgoundColor(separate_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
continue;
}
SetPixelChannel(separate_image,GrayPixelChannel,0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImage)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1 << channel),exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
register ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
/*
Set alpha channel by shape.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=UpdatePixelTrait;
(void) SetImageMask(image,WritePixelMask,image,exception);
(void) LevelImageColors(image,&image->background_color,
&image->background_color,MagickTrue,exception);
(void) SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
GB_unop__lnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_int32_int32)
// op(A') function: GB (_unop_tran__lnot_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bitcoin_fmt_plug.c | /* bitcoin-qt (bitcoin) wallet cracker patch for JtR. Hacked together during
* April of 2013 by Dhiru Kholia <dhiru at openwall dot com>.
*
* Also works for Litecoin-Qt (litecoin) wallet files!
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall dot com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* This cracks password protected bitcoin (bitcoin-qt) "wallet" files.
*
* bitcoin => https://github.com/bitcoin/bitcoin
*
* Thanks to Solar for asking to add support for bitcoin wallet files.
*/
#include "arch.h"
#include <openssl/opensslv.h>
#if (AC_BUILT && HAVE_EVP_SHA512) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x0090708f)
#if FMT_EXTERNS_H
extern struct fmt_main fmt_bitcoin;
#elif FMT_REGISTERS_H
john_register_one(&fmt_bitcoin);
#else
#include <openssl/evp.h>
#include <string.h>
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sha2.h"
#include "stdint.h"
#include "johnswap.h"
#include "sse-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 1
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Bitcoin"
#define FORMAT_NAME ""
#ifdef MMX_COEF_SHA512
#define ALGORITHM_NAME "SHA512 AES " SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 AES 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 64
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef MMX_COEF_SHA512
#define MIN_KEYS_PER_CRYPT MMX_COEF_SHA512
#define MAX_KEYS_PER_CRYPT MMX_COEF_SHA512
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SZ 128
static struct fmt_tests bitcoin_tests[] = {
/* bitcoin wallet hashes */
{"$bitcoin$96$169ce74743c260678fbbba92e926198702fd84e46ba555190f6f3d82f6852e4adeaa340d2ac065288e8605f13d1d7c86$16$26049c64dda292d5$177864$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "openwall"},
{"$bitcoin$96$bd97a08e00e38910550e76848949285b9702fe64460f70d464feb2b63f83e1194c745e58fa4a0f09ac35e5777c507839$16$26049c64dda292d5$258507$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "password"},
{"$bitcoin$96$4eca412eeb04971428efec70c9e18fb9375be0aa105e7eec55e528d0ba33a07eb6302add36da86736054dee9140ec9b8$16$26049c64dda292d5$265155$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "strongpassword"},
/* litecoin wallet hash */
{"$bitcoin$96$54401984b32448917b6d18b7a11debe91d62aaa343ab62ed98e1d3063f30817832c744360331df94cbf1dcececf6d00e$16$bfbc8ee2c07bbb4b$194787$96$07a206d5422640cfa65a8482298ad8e8598b94d99e2c4ce09c9d015b734632778cb46541b8c10284b9e14e5468b654b9$66$03fe6587bf580ee38b719f0b8689c80d300840bbc378707dce51e6f1fe20f49c20", "isyourpasswordstronger"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt {
unsigned char cry_master[SZ];
int cry_master_length;
unsigned char cry_salt[SZ];
int cry_salt_length;
int cry_rounds;
unsigned char ckey[SZ];
int ckey_length;
unsigned char public_key[SZ];
int public_key_length;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc_tiny(cracked_size, MEM_ALIGN_WORD);
}
// #define BTC_DEBUG
#ifdef BTC_DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int isdec(char *q)
{
char buf[24];
int x = atoi(q);
sprintf(buf, "%d", x);
return !strcmp(q,buf) && *q != '-';
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p = NULL;
int res;
if (strncmp(ciphertext, "$bitcoin$", 9))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 9;
if ((p = strtok(ctcopy, "$")) == NULL) /* cry_master_length (of the hex string) */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL) /* cry_master */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_master */
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* cry_salt_length (length of hex string) */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL) /* cry_salt */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_salt */
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* cry_rounds */
goto err;
if (!isdec(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* ckey_length (of hex) */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL) /* ckey */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and ckey */
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* public_key_length */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL) /* public_key */
goto err;
if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and public_key */
goto err;
if (!ishex(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
char *p;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 9;
p = strtok(ctcopy, "$");
cs.cry_master_length = atoi(p) / 2;
p = strtok(NULL, "$");
for (i = 0; i < cs.cry_master_length; i++)
cs.cry_master[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.cry_salt_length = atoi(p) / 2;
p = strtok(NULL, "$");
for (i = 0; i < cs.cry_salt_length; i++)
cs.cry_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.cry_rounds = atoi(p);
p = strtok(NULL, "$");
cs.ckey_length = atoi(p) / 2;
p = strtok(NULL, "$");
for (i = 0; i < cs.ckey_length; i++)
cs.ckey[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.public_key_length = atoi(p) / 2;
p = strtok(NULL, "$");
for (i = 0; i < cs.public_key_length; i++)
cs.public_key[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char output[SZ];
int fOk;
SHA512_CTX sha_ctx;
EVP_CIPHER_CTX ctx;
int nPLen, nFLen;
int i;
#ifdef MMX_COEF_SHA512
//JTR_ALIGN(16) ARCH_WORD_64 key_iv[MMX_COEF_SHA512*SHA512_BUF_SIZ]; // 2 * 16 bytes == 2048 bits, i.e. two SHA blocks
// the above alignment was crashing on OMP build on some 32 bit linux (compiler bug?? not aligning).
// so the alignment was done using raw buffer, and aligning at runtime to get 16 byte alignment.
// that works, and should cause no noticeable overhead differences.
char unaligned_buf[MMX_COEF_SHA512*SHA512_BUF_SIZ*sizeof(ARCH_WORD_64)+16];
ARCH_WORD_64 *key_iv = (ARCH_WORD_64*)mem_align(unaligned_buf, 16);
JTR_ALIGN(8) unsigned char hash1[SHA512_DIGEST_LENGTH]; // 512 bits
int index2;
for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) {
// The first hash for this password
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, saved_key[index+index2], strlen(saved_key[index+index2]));
SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);
SHA512_Final(hash1, &sha_ctx);
// We need to set ONE time, the upper half of the data buffer. We put the 0x80 byte (in BE format), at offset
// 512-bits (SHA512_DIGEST_LENGTH) multiplied by the MMX_COEF_SHA512 (same as MAX_KEYS_PER_CRYPT), then zero
// out the rest of the buffer, putting 512 (#bits) at the end. Once this part of the buffer is set up, we never
// touch it again, for the rest of the crypt. We simply overwrite the first half of this buffer, over and over
// again, with BE results of the prior hash.
key_iv[ SHA512_DIGEST_LENGTH/sizeof(ARCH_WORD_64) * MMX_COEF_SHA512 + index2 ] = 0x8000000000000000ULL;
for (i = (SHA512_DIGEST_LENGTH/sizeof(ARCH_WORD_64)+1) * MMX_COEF_SHA512 + index2; i < 15*MMX_COEF_SHA512; i += MMX_COEF_SHA512)
key_iv[i] = 0;
key_iv[15*MMX_COEF_SHA512 + index2] = (SHA512_DIGEST_LENGTH << 3);
// Now copy and convert hash1 from flat into MMX_COEF_SHA512 buffers.
for (i = 0; i < SHA512_DIGEST_LENGTH/sizeof(ARCH_WORD_64); ++i) {
#if COMMON_DIGEST_FOR_OPENSSL
key_iv[MMX_COEF_SHA512*i + index2] = sha_ctx.hash[i]; // this is in BE format
#else
key_iv[MMX_COEF_SHA512*i + index2] = sha_ctx.h[i];
#endif
}
}
for (i = 1; i < cur_salt->cry_rounds; i++) // start at 1; the first iteration is already done
SSESHA512body(key_iv, key_iv, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// We must fixup final results. We have been working in BE (NOT switching out of, just to switch back into it at every loop).
// Convert the first 6 words (48 bytes, all we need) of each hash back to LE.
alter_endianity_to_BE64(key_iv, 6 * MAX_KEYS_PER_CRYPT);
for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) {
unsigned char key[32];
unsigned char iv[16];
// Copy and convert from MMX_COEF_SHA512 buffers back into flat buffers
for (i = 0; i < sizeof(key)/sizeof(ARCH_WORD_64); i++) // the derived key
((ARCH_WORD_64 *)key)[i] = key_iv[MMX_COEF_SHA512*i + index2];
for (i = 0; i < sizeof(iv)/sizeof(ARCH_WORD_64); i++) // the derived iv
((ARCH_WORD_64 *)iv)[i] = key_iv[MMX_COEF_SHA512*(sizeof(key)/sizeof(ARCH_WORD_64) + i) + index2];
/* NOTE: write our code instead of using following high-level OpenSSL functions */
EVP_CIPHER_CTX_init(&ctx);
fOk = EVP_DecryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, key, iv);
if (fOk)
fOk = EVP_DecryptUpdate(&ctx, output, &nPLen, cur_salt->cry_master, cur_salt->cry_master_length);
if (fOk)
fOk = EVP_DecryptFinal_ex(&ctx, output + nPLen, &nFLen);
EVP_CIPHER_CTX_cleanup(&ctx);
// a decrypted mkey is exactly 32 bytes in len; ossl has already checked the padding (16 0x0f's) for us
if (fOk && nPLen + nFLen == 32) {
cracked[index + index2] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
#else
unsigned char key_iv[SHA512_DIGEST_LENGTH]; // buffer for both the derived key and iv
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, saved_key[index], strlen(saved_key[index]));
SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length);
SHA512_Final(key_iv, &sha_ctx);
for (i = 1; i < cur_salt->cry_rounds; i++) { // start at 1; the first iteration is already done
SHA512_Init(&sha_ctx);
SHA512_Update(&sha_ctx, key_iv, SHA512_DIGEST_LENGTH);
SHA512_Final(key_iv, &sha_ctx);
}
/* NOTE: write our code instead of using following high-level OpenSSL functions */
EVP_CIPHER_CTX_init(&ctx);
fOk = EVP_DecryptInit_ex(&ctx, EVP_aes_256_cbc(), NULL, key_iv, key_iv+32);
if (fOk)
fOk = EVP_DecryptUpdate(&ctx, output, &nPLen, cur_salt->cry_master, cur_salt->cry_master_length);
if (fOk)
fOk = EVP_DecryptFinal_ex(&ctx, output + nPLen, &nFLen);
EVP_CIPHER_CTX_cleanup(&ctx);
// a decrypted mkey is exactly 32 bytes in len; ossl has already checked the padding (16 0x0f's) for us
if (fOk && nPLen + nFLen == 32) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
static void bitcoin_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
#if FMT_MAIN_VERSION
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->cry_rounds;
}
#endif
struct fmt_main fmt_bitcoin = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
bitcoin_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 9
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
#endif
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
bitcoin_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* OpenSSL requirement */
|
trmv_x_bsr_n_hi.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/opt.h"
#include <string.h>
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT j = 0; j < A->rows * A->block_size; j++)
{
alpha_mul(y[j], y[j], beta);
}
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
for (ALPHA_INT br = partition[tid]; br < partition[tid + 1]; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
ALPHA_Number val_orig;
ALPHA_Number temp_orig;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = b_row; b_col < bs; b_col++)
{
alpha_mul(temp_orig, alpha, A->values[a0_idx + b_row * bs + b_col]);
alpha_madde(y[b_row + row], temp_orig, x[col + b_col]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
ALPHA_INT b_col = 0;
for (; b_col < bs; b_col++)
{
alpha_mul(temp_orig, alpha, A->values[a0_idx + b_row * bs + b_col]);
alpha_madde(y[b_row + row], temp_orig, x[col + b_col]);
}
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
for (ALPHA_INT br = partition[tid]; br < partition[tid + 1]; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ++ai)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
ALPHA_Number val_orig;
ALPHA_Number temp_orig;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row <= b_col; b_row++)
{
alpha_mul(temp_orig, alpha, A->values[a0_idx + b_col * bs + b_row]);
alpha_madde(y[b_row + row], temp_orig, x[col + b_col]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_mul(temp_orig, alpha, A->values[a0_idx + b_col * bs + b_row]);
alpha_madde(y[b_row + row], temp_orig, x[col + b_col]);
}
}
}
}
}
}
}
else
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
return ALPHA_SPARSE_STATUS_SUCCESS;
} |
gemm_symm_int8.h | // chgemm is pleased to support the open source community by supporting ncnn available.
//
// author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64.
//
// Copyright (C) 2019 tpoisonooo. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#pragma once
#if __aarch64__
#define DECOMPOSE_K \
int ktmp = k; \
int k8 = k >> 3; \
int k8_even = (k8 % 2 == 0) ? 0 : 1; \
k -= (k8 << 3); \
int k4 = k >> 2; \
k -= (k4 << 2); \
int k2 = k >> 1; \
k -= (k2 << 1); \
int k1 = k; \
k = ktmp;
#define DECOMPOSE_N \
int ntmp = n; \
int n4 = n >> 2; \
n -= (n4 << 2); \
int n2 = n >> 1; \
n -= (n2 << 1); \
int n1 = n; \
n = ntmp;
#define PRINT_MATRIX 0
#if PRINT_MATRIX
static void print_int8_matrix(char* name, const int8_t* a, int m, int k, int ldx)
{
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_int32_matrix(char* name, const int32_t* a, int m, int k, int ldx)
{
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_fp32_vec(char* name, const float* a, int len)
{
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < len; ++i)
{
fprintf(stdout, "%f \t", a[i]);
}
fprintf(stdout, "\n\n");
}
#endif
static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx)
{
#if PRINT_MATRIX
print_int8_matrix("b", b, k, n, ldx);
int8_t* origin = sb;
#endif
int i = 0;
for (; i + 3 < n; i += 4)
{
const int8_t* p0 = b + i;
const int8_t* p1 = b + 1 * ldx + i;
const int8_t* p2 = b + 2 * ldx + i;
const int8_t* p3 = b + 3 * ldx + i;
const int8_t* p4 = b + 4 * ldx + i;
const int8_t* p5 = b + 5 * ldx + i;
const int8_t* p6 = b + 6 * ldx + i;
const int8_t* p7 = b + 7 * ldx + i;
int j = 0;
for (; j + 7 < k; j += 8)
{
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb[16] = p0[2];
sb[17] = p1[2];
sb[18] = p2[2];
sb[19] = p3[2];
sb[20] = p4[2];
sb[21] = p5[2];
sb[22] = p6[2];
sb[23] = p7[2];
sb[24] = p0[3];
sb[25] = p1[3];
sb[26] = p2[3];
sb[27] = p3[3];
sb[28] = p4[3];
sb[29] = p5[3];
sb[30] = p6[3];
sb[31] = p7[3];
sb += 32;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j + 3 < k)
{
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb[8] = p0[2];
sb[9] = p1[2];
sb[10] = p2[2];
sb[11] = p3[2];
sb[12] = p0[3];
sb[13] = p1[3];
sb[14] = p2[3];
sb[15] = p3[3];
sb += 16;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j + 1 < k)
{
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb[4] = p0[2];
sb[5] = p1[2];
sb[6] = p0[3];
sb[7] = p1[3];
sb += 8;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k)
{
sb[0] = p0[0];
sb[1] = p0[1];
sb[2] = p0[2];
sb[3] = p0[3];
sb += 4;
p0 += ldx;
}
}
if (i + 1 < n)
{
const int8_t* p0 = b + i;
const int8_t* p1 = b + 1 * ldx + i;
const int8_t* p2 = b + 2 * ldx + i;
const int8_t* p3 = b + 3 * ldx + i;
const int8_t* p4 = b + 4 * ldx + i;
const int8_t* p5 = b + 5 * ldx + i;
const int8_t* p6 = b + 6 * ldx + i;
const int8_t* p7 = b + 7 * ldx + i;
int j = 0;
for (; j + 7 < k; j += 8)
{
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb += 16;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j + 3 < k)
{
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb += 8;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j + 1 < k)
{
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb += 4;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k)
{
sb[0] = p0[0];
sb[1] = p0[1];
sb += 2;
p0 += ldx;
}
i += 2;
}
if (i < n)
{
const int8_t* p0 = b + i;
const int8_t* p1 = b + 1 * ldx + i;
const int8_t* p2 = b + 2 * ldx + i;
const int8_t* p3 = b + 3 * ldx + i;
const int8_t* p4 = b + 4 * ldx + i;
const int8_t* p5 = b + 5 * ldx + i;
const int8_t* p6 = b + 6 * ldx + i;
const int8_t* p7 = b + 7 * ldx + i;
int j = 0;
for (; j + 7 < k; j += 8)
{
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb += 8;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j + 3 < k)
{
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb += 4;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j + 1 < k)
{
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb += 2;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k)
{
sb[0] = p0[0];
sb += 1;
p0 += ldx;
}
}
#if PRINT_MATRIX
print_int8_matrix("sb", origin, k, n, n);
#endif
}
static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx)
{
#if PRINT_MATRIX
print_int8_matrix("a", a, m, k, ldx);
int8_t* origin = sa;
#endif
int i = 0;
for (; i + 3 < m; i += 4)
{
int8_t* p0 = a;
int8_t* p1 = a + ldx;
int8_t* p2 = a + 2 * ldx;
int8_t* p3 = a + 3 * ldx;
int j = 0;
for (; j + 7 < k; j += 8)
{
asm volatile(
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"ld1 {v2.8b}, [%2], #8 \n"
"ld1 {v3.8b}, [%3], #8 \n"
"st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3");
}
if (j + 3 < k)
{
j += 4;
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #4 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
"trn1 v2.2s, v2.2s, v3.2s \n"
"st1 {v2.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3");
}
if (j + 1 < k)
{
j += 2;
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #2 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #2 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #2 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #2 \n"
"trn1 v0.4h, v0.4h, v1.4h \n"
"trn1 v2.4h, v2.4h, v3.4h \n"
"trn1 v0.2s, v0.2s, v2.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3");
}
if (j < k)
{
*sa++ = *p0;
*sa++ = *p1;
*sa++ = *p2;
*sa++ = *p3;
}
a += 4 * ldx;
}
if (i + 1 < m)
{
i += 2;
int8_t* p0 = a;
int8_t* p1 = a + ldx;
int j = 0;
for (; j + 7 < k; j += 8)
{
asm volatile(
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"st1 {v0.8b, v1.8b}, [%2], #16\n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1");
}
if (j + 3 < k)
{
j += 4;
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%2], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1");
}
if (j + 1 < k)
{
j += 2;
sa[0] = p0[0];
sa[1] = p0[1];
sa[2] = p1[0];
sa[3] = p1[1];
sa += 4;
p0 += 2;
p1 += 2;
}
if (j < k)
{
sa[0] = p0[0];
sa[1] = p1[0];
sa += 2;
}
a += 2 * ldx;
}
if (i < m)
{
memcpy(sa, a, sizeof(int8_t) * ldx);
}
#if PRINT_MATRIX
print_int8_matrix("sa", origin, m, k, k);
#endif
}
static void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias)
{
void* pc = dst;
int8_t* pa = sa;
int8_t* pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0)
{
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f \n"
" mov w19, %w4 \n"
" cmp %w3, #0 \n"
" beq 2f// loop number is even \n"
" // start loopm1_kd8_nd4\n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" saddlp v10.4s, v0.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" saddlp v11.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v12.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v13.8b \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v2.8b, v6.8b \n"
" smlal v0.8h, v3.8b, v14.8b \n"
" sadalp v10.4s, v0.8h \n"
" smull v1.8h, v2.8b, v7.8b \n"
" smlal v1.8h, v3.8b, v15.8b \n"
" sadalp v11.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, #0 \n"
" beq 4f \n"
" // start subkernel_m1n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" 4: \n"
" cmp %w6, #0 \n"
" beq 5f \n"
" // start subkernel_m1n4k2\n"
" ld1 {v4.8b}, [%0] // load A1x2 \n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" mov v4.h[1], v4.h[0] \n"
" mov v4.s[1], v4.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" sadalp v8.4s, v0.8h \n"
" 5: \n"
" cmp %w7, #0 \n"
" beq 6f \n"
" // start subkernel_m1n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #1 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" ldr w24, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" mov v12.s[0], w24 \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" dup v15.4s, w24 \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2]\n"
" add %2, %2, #4 \n"
" b 10f\n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" 10: \n"
" subs %w8, %w8, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even), // %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n2 > 0)
{
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7\n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd1_kd8_nd2 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v6.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v7.8b \n"
" sadalp v9.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n2k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" mov v4.h[1], v4.h[0] \n"
" smull v0.8h, v4.8b, v0.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // v12: s0 s1 \n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8:\n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2]\n"
" add %2, %2, #2 \n"
" b 10f\n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even), // %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n1 > 0)
{
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7 \n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load B line \n"
" ld1 {v2.8b}, [%0], #8 // load A line \n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v25.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n1k4 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n1k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1 \n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A1x1 \n"
" add %0, %0, #1 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0] \n"
" add v8.4s, v8.4s, v0.4s \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm\n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2]\n"
" b 10f \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" 10: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even), // %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
}
static void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias)
{
void *pc0, *pc1;
if (scales == 0)
{
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
}
else
{
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
}
int8_t* pa = sa;
int8_t* pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0)
{
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b \n"
" eor v11.16b, v11.16b, v11.16b \n"
" eor v12.16b, v12.16b, v12.16b \n"
" eor v13.16b, v13.16b, v13.16b \n"
" eor v14.16b, v14.16b, v14.16b \n"
" eor v15.16b, v15.16b, v15.16b \n"
" eor v16.16b, v16.16b, v16.16b \n"
" eor v17.16b, v17.16b, v17.16b \n"
" eor v18.16b, v18.16b, v18.16b \n"
" eor v19.16b, v19.16b, v19.16b \n"
" eor v20.16b, v20.16b, v20.16b \n"
" eor v21.16b, v21.16b, v21.16b \n"
" eor v22.16b, v22.16b, v22.16b \n"
" eor v23.16b, v23.16b, v23.16b \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopm2_kd8_nd4\n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" saddlp v15.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" add x12, %1, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // start v10v11, v14v15, v18v19, v22v23, error here!\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" add %1, %1, #32 \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v16.4s, v3.4h, v4.4h \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" 4: \n"
" cmp %w7, #0 \n"
" beq 5f \n"
" // start subkernel_m2n4k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2 \n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" smull v14.8h, v4.8b, v2.8b \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v12.4s, v12.8h \n"
" saddlp v13.4s, v13.8h \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v9.4s, v4.4h, v2.h[1]\n"
" 6: \n"
" cmp %10, #0 \n"
" beq 7f \n"
" ld1 {v12.2s}, [%10] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" fmul v9.4s, v9.4s, v12.s[1]\n"
" cmp %11, #0 \n"
" beq 8f \n"
" // fp32 += scales_tm \n"
" ld1 {v14.2s}, [%11] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" fcvtas v9.4s, v9.4s\n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s\n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %2, %2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %3, %3, #4 \n"
" b 10f \n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" st1 {v9.4s}, [%3], #16 \n"
" 10: \n"
" subs %w9, %w9, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even), // %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(n4), // %9
"=r"(scales), // %10
"=r"(bias) // %11
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(n4),
"10"(scales),
"11"(bias)
: "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n2 > 0)
{
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"9: \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd2_kd8_nd2 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [%1], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n2k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" // 00 11\n"
" rev32 v1.4h, v0.4h // 11 00\n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" // v12: 0 1 \n"
" ld1 {v12.2s}, [%9] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" // v12: 0 0 1 1 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" b 10f \n"
" 7:"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even), // %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n1 > 0)
{
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"9: \n"
" cmp %w5, #0 \n"
" beq 1f // k <=7\n"
" mov w17, %w5\n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v26.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v27.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0], #8 // load A2x4 \n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, 0 \n"
" beq 6f \n"
" // start subkernel_m2n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0]\n"
" mov v1.s[0], v0.s[1] \n"
" add v8.4s, v8.4s, v0.4s \n"
" add v12.4s, v12.4s, v1.4s \n"
" 6: \n"
" cmp %w9, #0 \n"
" beq 7f \n"
" mov v8.s[1], v12.s[0] \n"
" // v12: s0 s1 \n"
" ld1 {v12.2s}, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" b 10f \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" 10: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even), // %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
}
static void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias)
{
void *pc0, *pc1, *pc2, *pc3;
if (scales == 0)
{
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
pc2 = ((int32_t*)pc1) + ldc;
pc3 = ((int32_t*)pc2) + ldc;
}
else
{
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
pc2 = ((int8_t*)pc1) + ldc;
pc3 = ((int8_t*)pc2) + ldc;
}
int8_t* pa = sa;
int8_t* pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0)
{
asm volatile(
"8: \n"
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
" mov x8, %0 \n"
" cmp %w7, #0 \n"
" beq 1f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" saddlp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v18.4s, v0.8h \n"
" saddlp v22.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v19.4s, v0.8h \n"
" saddlp v23.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %x1, #32 \n"
" add x14, %x0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16\n"
" ld1 {v2.8b, v3.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v24.8b\n"
" smlal v1.8h, v7.8b, v24.8b\n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v16.4s, v0.8h \n"
" sadalp v17.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v20.4s, v0.8h \n"
" sadalp v21.4s, v1.8h \n"
" // start v10v11, v14v15, v18v19, v22v23\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v18.4s, v0.8h \n"
" sadalp v19.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v22.4s, v0.8h \n"
" sadalp v23.4s, v1.8h \n"
" add %0, %0, #32 \n"
" add %1, %1, #32 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
// start nd2
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v22.4s, v22.4s, v23.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" addp v10.4s, v16.4s, v18.4s\n"
" addp v11.4s, v20.4s, v22.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, #0 \n"
" beq 4f \n"
" // start subkernel_m4n4k4\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" sxtl v5.8h, v5.8b \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v10.4s, v10.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v11.4s, v11.4s, v16.4s \n"
" 4: \n"
" cmp %w9, #0 \n"
" beq 5f \n"
" // start subkernel_m4n4k2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" saddlp v12.4s, v12.8h \n"
" smull v14.8h, v4.8b, v2.8b \n"
" saddlp v13.4s, v13.8h \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" mov v18.s[0], v14.s[2] \n"
" mov v18.s[1], v15.s[2] \n"
" mov v18.s[2], v12.s[2] \n"
" mov v18.s[3], v13.s[2] \n"
" mov v19.s[0], v15.s[3] \n"
" mov v19.s[1], v14.s[3] \n"
" mov v19.s[2], v13.s[3] \n"
" mov v19.s[3], v12.s[3] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" add v10.4s, v10.4s, v18.4s \n"
" add v11.4s, v11.4s, v19.4s \n"
" 5: \n"
" cmp %w10, #0 \n"
" beq 6f \n"
" // start subkernel_m4n4k1\n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v9.4s, v4.4h, v2.h[1] \n"
" smlal v10.4s, v4.4h, v2.h[2] \n"
" smlal v11.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %12, #0 \n"
" beq 9f \n"
" ld1 {v12.4s}, [%12] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" scvtf v10.4s, v10.4s \n"
" scvtf v11.4s, v11.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0] \n"
" fmul v9.4s, v9.4s, v12.s[1] \n"
" fmul v10.4s, v10.4s, v12.s[2] \n"
" fmul v11.4s, v11.4s, v12.s[3] \n"
" cmp %13, #0 \n"
" beq 7f \n"
" ld1 {v14.4s}, [%13] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" dup v15.4s, v14.s[2] \n"
" fadd v10.4s, v10.4s, v15.4s\n"
" dup v15.4s, v14.s[3] \n"
" fadd v11.4s, v11.4s, v15.4s\n"
" 7: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v9.4s, v9.4s \n"
" fcvtas v10.4s, v10.4s \n"
" fcvtas v11.4s, v11.4s \n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s \n"
" sqxtn v7.4h, v10.4s \n"
" sqxtn2 v7.8h, v11.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" sqxtn v9.8b, v7.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %x2, %x2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %x3, %x3, #4 \n"
" st1 {v9.s}[0], [%4] \n"
" add %x4, %x4, #4 \n"
" st1 {v9.s}[1], [%5] \n"
" add %x5, %x5, #4 \n"
" b 10f \n"
" 9: \n"
" st1 {v8.4s}, [%x2], #16 \n"
" st1 {v9.4s}, [%x3], #16 \n"
" st1 {v10.4s}, [%x4], #16 \n"
" st1 {v11.4s}, [%x5], #16 \n"
" 10: \n"
" subs %x11, %x11, #1 \n"
" mov %x0, x8 \n"
" bne 8b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even), // %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(n4), // %11
"=r"(scales), // %12
"=r"(bias) // %13
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(n4),
"12"(scales),
"13"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n2 > 0)
{
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"9: \n"
" mov x8, %x0 // PanelA \n"
" cmp %w7, #0 \n"
" beq 1f // k <= 7 \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f// loop number is even \n"
" // start loopkd8_nd2 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %1, #16 \n"
" add x14, %0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v28.8b\n"
" smull v1.8h, v5.8b, v28.8b\n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b\n"
" smlal v1.8h, v7.8b, v26.8b\n"
" sadalp v16.4s, v0.8h\n"
" sadalp v17.4s, v1.8h\n"
" smull v0.8h, v4.8b, v29.8b\n"
" smull v1.8h, v5.8b, v29.8b\n"
" smlal v0.8h, v6.8b, v27.8b\n"
" smlal v1.8h, v7.8b, v27.8b\n"
" sadalp v20.4s, v0.8h\n"
" sadalp v21.4s, v1.8h\n"
" add %0, %0, #32 \n"
" add %1, %1, #16 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, 0 \n"
" beq 4f \n"
" // start subkernel_m4n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v17.4s, v2.4h, v4.4h \n"
" smull v18.4s, v2.4h, v6.4h \n"
" addp v17.4s, v17.4s, v18.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v3.4h, v4.4h \n"
" smull v22.4s, v3.4h, v6.4h \n"
" addp v21.4s, v21.4s, v22.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 4: \n"
" cmp %w9, 0 \n"
" beq 5f \n"
" // start subkernel_m4n2k2 \n"
" ld1 {v4.8b}, [%0], #8 //load A4x2\n"
" ld1 {v0.8b}, [%1] // load B2x2 \n"
" add %1, %1, #4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" smull v23.8h, v4.8b, v2.8b \n"
" smull v24.8h, v4.8b, v3.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" saddlp v23.4s, v23.8h \n"
" saddlp v24.4s, v24.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s\n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v23.s[2] \n"
" mov v17.s[1], v24.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v24.s[3] \n"
" mov v21.s[1], v23.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 5: \n"
" cmp %w10, 0 \n"
" beq 6f \n"
" // start subkernel_m4n2k1\n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" smlal v16.4s, v4.4h, v2.h[2] \n"
" smlal v20.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %11, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" mov v16.d[1], v20.d[0] \n"
" // v12: 0 1 2 3 \n"
" ld1 {v12.4s}, [%11] \n"
" zip2 v13.4s, v12.4s, v12.4s \n"
" zip1 v12.4s, v12.4s, v12.4s \n"
" // v12: 0 0 1 1 \n"
" // v13: 2 2 3 3 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v16.4s, v16.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" fmul v16.4s, v16.4s, v13.4s\n"
" cmp %12, #0 \n"
" beq 8f // skip add scales \n"
" // fp32 += scales_tm \n"
" ld1 {v12.4s}, [%12] \n"
" zip2 v13.4s, v12.4s, v12.4s\n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" fadd v16.4s, v16.4s, v13.4s\n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v16.4s, v16.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" sqxtn v16.4h, v16.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" sqxtn v16.8b, v16.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" st1 {v16.h}[0], [%4] \n"
" add %4, %4, #2 \n"
" st1 {v16.h}[1], [%5] \n"
" add %5, %5, #2 \n"
" b 10f \n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" st1 {v16.2s}, [%4], #8 \n"
" st1 {v20.2s}, [%5], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even), // %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
if (n1 > 0)
{
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"1: \n"
" cmp %w7, #0 \n"
" beq 10f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 11f// loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 12f \n"
" 11: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v28.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v29.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" smull v0.8h, v26.8b, v4.8b \n"
" smlal v0.8h, v30.8b, v5.8b \n"
" sadalp v16.4s, v0.8h \n"
" smull v1.8h, v27.8b, v4.8b \n"
" smlal v1.8h, v31.8b, v5.8b \n"
" sadalp v20.4s, v1.8h \n"
" subs w20, w20, #2 \n"
" bne 11b \n"
" 12: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 10: \n"
" cmp %w8, #0 \n"
" beq 13f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %x1, %x1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" sxtl v3.8h, v3.8b \n"
" mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" smull v17.4s, v3.4h, v4.4h \n"
" addp v17.4s, v17.4s, v17.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v6.4h, v4.4h \n"
" addp v21.4s, v21.4s, v21.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 13: \n"
" cmp %w9, #0 \n"
" beq 14f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" ld1 {v0.8b}, [%1] // load B2x1 \n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" mov v0.s[1], v0.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 14: \n"
" cmp %w10, #0 \n"
" beq 15f \n"
" // start subkernel_m4n1k1 \n"
" ld1 {v4.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smull v0.4s, v2.4h, v4.h[0]\n"
" add v8.4s, v8.4s, v0.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 15: \n"
// REQUANT
" cmp %11, #0 \n"
" beq 16f \n"
" mov v8.s[1], v12.s[0] \n"
" mov v8.s[2], v16.s[0] \n"
" mov v8.s[3], v20.s[0] \n"
" // v12: s0 s1 s2 s3 \n"
" ld1 {v12.4s}, [%11] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %12, #0 \n"
" beq 17f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.4s}, [%12] \n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 17: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" st1 {v8.b}[2], [%4] \n"
" st1 {v8.b}[3], [%5] \n"
" b 2f \n"
" // no need to add the last output pointer\n"
" 16: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" st1 {v16.s}[0], [%4] \n"
" st1 {v20.s}[0], [%5] \n"
" 2: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even), // %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
}
#undef DECOMPOSE_K
#undef DECOMPOSE_N
static void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt)
{
int8_t* pa = (int8_t*)sa;
int8_t* pb = (int8_t*)sb;
const int nn = (m >> 2) << 2;
if (scales == 0)
{
int32_t* pc = (int32_t*)dst;
#if PRINT_MATRIX
int32_t* origin = pc;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4)
{
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, 0, 0);
}
pa += nn * k;
pc += nn * ldc;
switch (m - nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
pc += 2 * ldc;
pa += 2 * k;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 0:
default:
break;
}
#if PRINT_MATRIX
print_int32_matrix("pc", origin, m, n, ldc);
#endif
}
else
{
int8_t* pc = (int8_t*)dst;
#if PRINT_MATRIX
print_fp32_vec("scales", scales, m);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4)
{
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias == 0) ? 0 : bias + i);
}
pa += nn * k;
pc += nn * ldc;
scales += nn;
bias = (bias == 0) ? 0 : bias + nn;
switch (m - nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
pc += 2 * ldc;
pa += 2 * k;
scales += 2;
bias = (bias == 0) ? 0 : bias + 2;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 0:
default:
break;
}
}
return;
}
#ifdef PRINT_MATRIX
#undef PRINT_MATRIX
#endif
#endif
|
GB_unaryop__lnot_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int8
// op(A') function: GB_tran__lnot_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int8
(
float *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_atomic.c | /***** We checked the operations of
****** ++, --, +, -, *, /, &, |, << and >> for atomic directives.
****** especially we checked both integer and double precision for
****** +, -, /. It is revised by Zhenying Liu of University of Houston.
*****/
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int
check_omp_atomic (FILE * logFile)
{
int sum = 0;
int known_sum;
double dsum = 0;
double dknown_sum;
double dt = 0.5; /* base of geometric row for + and - test */
double rounding_error = 1.E-9;
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
int diff;
double ddiff;
int product = 1;
int known_product;
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
/* int logic_and = 1;
int logic_or = 0; */
int bit_and = 1;
int bit_or = 0;
int exclusiv_bit_or = 0;
int logics[LOOPCOUNT];
int i;
double dpt, div;
int x;
int result = 0;
dt = 1. / 3.;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
#pragma omp atomic
sum += i;
}
}
if (known_sum != sum)
{
result++;
fprintf (logFile,
"Error in sum with integers: Result was %d instead of %d.\n",
sum, known_sum);
}
diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= LOOPCOUNT; ++i)
{
#pragma omp atomic
diff -= i;
}
}
if (diff != 0)
{
result++;
fprintf (logFile,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
}
/* Tests for doubles */
dsum = 0;
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
#pragma omp atomic
dsum += pow (dt, i);
}
}
if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error))
{
result++;
fprintf (logFile,
"\nError in sum with doubles: Result was %f instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
dpt *= dt;
}
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
#pragma omp atomic
ddiff -= pow (dt, i);
}
}
if (fabs (ddiff) > rounding_error)
{
result++;
fprintf (logFile,
"Error in Difference with doubles: Result was %E instead of 0.0\n",
ddiff);
}
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
#pragma omp atomic
product *= i;
}
}
known_product = KNOWN_PRODUCT;
if (known_product != product)
{
result++;
fprintf (logFile,
"Error in Product with integers: Result was %d instead of %d\n",
product, known_product);
}
product = KNOWN_PRODUCT;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
#pragma omp atomic
product /= i;
}
}
if (product != 1)
{
result++;
fprintf (logFile,
"Error in division with integers: Result was %d instead of 1\n",
product );
}
div = 5.0E+5;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
#pragma omp atomic
div /= i;
}
}
if ( fabs(div-0.137787) >= 1.0E-4 )
{
result++;
fprintf (logFile,
"Error in division with double: Result was %f instead of 0.137787\n", div);
}
x = 0;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
x++;
}
}
if (x != LOOPCOUNT)
{
result++;
fprintf (logFile, "Error in ++\n");
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
x--;
}
}
if ( x != 0)
{
result++;
fprintf (logFile, "Error in --\n");
}
for (i = 0; i < LOOPCOUNT; ++i)
{
logics[i] = 1;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
bit_and &= logics[i];
}
}
if (!bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 1\n");
}
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
bit_and &= logics[i];
}
}
if (bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 2\n");
}
for (i = 0; i < LOOPCOUNT; i++)
{
logics[i] = 0;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
bit_or |= logics[i];
}
}
if (bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
bit_or |= logics[i];
}
}
if (!bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 2\n");
}
for (i = 0; i < LOOPCOUNT; i++)
{
logics[i] = 0;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
#pragma omp atomic
exclusiv_bit_or ^= logics[i];
}
}
if (!exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 2\n");
}
x = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < 10; ++i)
{
#pragma omp atomic
x <<= 1;
}
}
if ( x != 1024)
{
result++;
fprintf (logFile, "Error in <<\n");
x = 1024;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < 10; ++i)
{
#pragma omp atomic
x >>= 1;
}
}
if ( x != 1 )
{
result++;
fprintf (logFile, "Error in >>\n");
}
/*fprintf("\nResult:%d\n",result); */
return (result == 0);
}
int
crosscheck_omp_atomic (FILE * logFile)
{
int sum = 0;
int known_sum;
double dsum = 0;
double dknown_sum;
double dt = 0.5; /* base of geometric row for + and - test */
double rounding_error = 1.E-9;
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
int diff;
double ddiff;
int product = 1;
int known_product;
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
/* int logic_and = 1;
int logic_or = 0; */
int bit_and = 1;
int bit_or = 0;
int exclusiv_bit_or = 0;
int logics[LOOPCOUNT];
int i;
double dpt, div;
int x;
int result = 0;
dt = 1. / 3.;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
sum += i;
}
}
if (known_sum != sum)
{
result++;
fprintf (logFile,
"Error in sum with integers: Result was %d instead of %d.\n",
sum, known_sum);
}
diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= LOOPCOUNT; ++i)
{
diff -= i;
}
}
if (diff != 0)
{
result++;
fprintf (logFile,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
}
/* Tests for doubles */
dsum = 0;
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
dsum += pow (dt, i);
}
}
if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error))
{
result++;
fprintf (logFile,
"\nError in sum with doubles: Result was %f instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
dpt = 1;
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
dpt *= dt;
}
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
ddiff -= pow (dt, i);
}
}
if (fabs (ddiff) > rounding_error)
{
result++;
fprintf (logFile,
"Error in Difference with doubles: Result was %E instead of 0.0\n",
ddiff);
}
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
product *= i;
}
}
known_product = KNOWN_PRODUCT;
if (known_product != product)
{
result++;
fprintf (logFile,
"Error in Product with integers: Result was %d instead of %d\n",
product, known_product);
}
product = KNOWN_PRODUCT;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
product /= i;
}
}
if (product != 1)
{
result++;
/* fprintf (logFile,
"Error in division with integers: Result was %d instead of 1\n",
product );
*/
}
div = 5.0E+5;
#pragma omp parallel
{
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
div /= i;
}
}
if ( fabs(div-0.137787) >= 1.0E-4 )
{
result++;
/* fprintf (logFile,
"Error in division with double: Result was %f instead of 0.137787\n", div);
*/
}
x = 0;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
x++;
}
}
if (x != LOOPCOUNT)
{
result++;
fprintf (logFile, "Error in ++\n");
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
x--;
}
}
if ( x != 0)
{
result++;
fprintf (logFile, "Error in --\n");
}
for (i = 0; i < LOOPCOUNT; ++i)
{
logics[i] = 1;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
bit_and &= logics[i];
}
}
if (!bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 1\n");
}
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
bit_and &= logics[i];
}
}
if (bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 2\n");
}
for (i = 0; i < LOOPCOUNT; i++)
{
logics[i] = 0;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
bit_or |= logics[i];
}
}
if (bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
bit_or |= logics[i];
}
}
if (!bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 2\n");
}
for (i = 0; i < LOOPCOUNT; i++)
{
logics[i] = 0;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
exclusiv_bit_or ^= logics[i];
}
}
if (exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
exclusiv_bit_or ^= logics[i];
}
}
if (!exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 2\n");
}
x = 1;
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < 10; ++i)
{
x <<= 1;
}
}
if ( x != 1024)
{
result++;
fprintf (logFile, "Error in <<\n");
x = 1024;
}
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < 10; ++i)
{
x >>= 1;
}
}
if ( x != 1 )
{
result++;
fprintf (logFile, "Error in >>\n");
}
/*fprintf("\nResult:%d\n",result); */
return (result == 0);
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return *child_begin();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() && "no associated statement.");
return *child_begin();
}
/// \brief Returns the captured statement associated with the
/// component region within the (combined) directive.
//
// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(std::any_of(
CaptureRegions.begin(), CaptureRegions.end(),
[=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
if (ThisCaptureRegion == RegionKind)
return CS;
CS = cast<CapturedStmt>(CS->getCapturedStmt());
}
llvm_unreachable("Incorrect RegionKind specified for directive.");
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() && getAssociatedStmt() &&
"Must have associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(!CaptureRegions.empty() &&
"At least one captured statement must be provided.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (unsigned Level = CaptureRegions.size(); Level > 1; --Level)
CS = cast<CapturedStmt>(CS->getCapturedStmt());
return CS;
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range(child_iterator(), child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if the construct has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
PreInitsOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
NumIterationsOffset = 16,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 17,
PrevLowerBoundVariableOffset = 17,
PrevUpperBoundVariableOffset = 18,
DistIncOffset = 19,
PrevEnsureUpperBoundOffset = 20,
CombinedLowerBoundVariableOffset = 21,
CombinedUpperBoundVariableOffset = 22,
CombinedEnsureUpperBoundOffset = 23,
CombinedInitOffset = 24,
CombinedConditionOffset = 25,
CombinedNextLowerBoundOffset = 26,
CombinedNextUpperBoundOffset = 27,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 28,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
Expr **Storage = reinterpret_cast<Expr **>(&*std::next(
child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters,
// PrivateCounters, Inits,
// Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setPreInits(Stmt *PreInits) {
*std::next(child_begin(), PreInitsOffset) = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NumIterationsOffset) = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), DistIncOffset) = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedInitOffset) = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedConditionOffset) = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically sheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically sheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
};
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// \brief PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// \brief DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// \brief PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getPreInits() const {
return *std::next(child_begin(), PreInitsOffset);
}
Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NumIterationsOffset)));
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevLowerBoundVariableOffset)));
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevUpperBoundVariableOffset)));
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), DistIncOffset)));
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevEnsureUpperBoundOffset)));
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedLowerBoundVariableOffset)));
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedUpperBoundVariableOffset)));
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedEnsureUpperBoundOffset)));
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedInitOffset)));
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedConditionOffset)));
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextLowerBoundOffset)));
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
const Stmt *Body =
getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1),
HasCancel(false) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, NumClauses, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPCriticalDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), NumClauses,
1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if this directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, NumClauses, 2) {}
/// Build an empty directive.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskgroupDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), NumClauses,
2) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) {
*std::next(child_begin(), 1) = RR;
}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return static_cast<const Expr *>(*std::next(child_begin(), 1));
}
Expr *getReductionRef() {
return static_cast<Expr *>(*std::next(child_begin(), 1));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPOrderedDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, StartLoc, EndLoc, NumClauses,
1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetEnterDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetExitDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, NumClauses, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
explicit OMPCancelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), NumClauses,
0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, SourceLocation(), SourceLocation(),
CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
StartLoc, EndLoc, CollapsedNum, NumClauses)
{}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses)
{}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// \brief This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, StartLoc, EndLoc, NumClauses,
1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetUpdateDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses), HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass,
OMPD_target_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd,
SourceLocation(),SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses), HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
} // end namespace clang
#endif
|
build.c | #define _CRT_SECURE_NO_WARNINGS
#include <iso646.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define global static
#define DEBUG_STRING(String) printf("[%zi] %s\n", strlen(String), String);
#define STRING_EQUAL(A, B) (strcmp(A, B) == 0)
#define APP_NAME "rtic"
#define CODE_FILE "../code/main.c"
enum TOOLCHAIN {
TOOLCHAIN_CLANG,
TOOLCHAIN_GCC,
TOOLCHAIN_MINGW,
TOOLCHAIN_MSVC,
TOOLCHAIN_PELLESC,
TOOLCHAIN_COUNT,
} typedef TOOLCHAIN;
enum COMMAND_TYPE {
COMMAND_COMPILER,
COMMAND_CODE_FILE,
COMMAND_OUTPUT,
COMMAND_WARNINGS,
COMMAND_FLAGS,
COMMAND_DEBUG_FLAGS,
COMMAND_RELEASE_FLAGS,
COMMAND_LINKER,
COMMAND_DEBUG_LINKER,
COMMAND_RELEASE_LINKER,
COMMAND_COUNT,
} typedef COMMAND_TYPE;
global char* Strings[TOOLCHAIN_COUNT][COMMAND_COUNT];
int main(int argc, char** argv) {
// defaults
bool Release = false;
bool Execute = false;
bool Mold = false;
int Toolchain = TOOLCHAIN_CLANG;
// argument parsing
for (int a = 1; a < argc; ++a) {
char* arg = argv[a];
if (STRING_EQUAL("-r", arg)) {
Release = true;
} else
if (STRING_EQUAL("-e", arg)) {
Execute = true;
} else
if (STRING_EQUAL("-mold", arg)) {
Mold = true;
} else
if (STRING_EQUAL("-clang", arg)) {
Toolchain = TOOLCHAIN_CLANG;
} else
if (STRING_EQUAL("-gcc", arg)) {
Toolchain = TOOLCHAIN_GCC;
} else
if (STRING_EQUAL("-mingw", arg)) {
Toolchain = TOOLCHAIN_MINGW;
} else
if (STRING_EQUAL("-msvc", arg)) {
Toolchain = TOOLCHAIN_MSVC;
} else
if (strcmp("-pellesc", arg) == 0) {
Toolchain = TOOLCHAIN_PELLESC;
} else
{
printf(
"build:\n"
"\t-r release\n"
"\t-e execute\n"
"\t-mold use mold linker\n"
"\t-<toolchain> [ clang* | gcc | msvc | mingw ]\n"
"\t (* default)\n"
);
return 0;
}
}
// command building
char Command[0xFFF] = {0};
if (Mold) strcat(Command, "mold --run ");
strcat(Command, Strings[Toolchain][COMMAND_COMPILER ]);
strcat(Command, Strings[Toolchain][COMMAND_CODE_FILE]);
strcat(Command, Strings[Toolchain][COMMAND_OUTPUT ]);
strcat(Command, Strings[Toolchain][COMMAND_WARNINGS ]);
strcat(Command, Strings[Toolchain][COMMAND_FLAGS ]);
strcat(Command, Strings[Toolchain][Release ? COMMAND_RELEASE_FLAGS : COMMAND_DEBUG_FLAGS ]);
strcat(Command, Strings[Toolchain][COMMAND_LINKER ]);
strcat(Command, Strings[Toolchain][Release ? COMMAND_RELEASE_LINKER : COMMAND_DEBUG_LINKER]);
DEBUG_STRING(Command);
system(Command);
// HACK: this whole execute block is probably brittle
if (Execute) {
#if defined(_WIN32)
system(APP_NAME ".exe");
#else
if (Toolchain == TOOLCHAIN_MINGW) {
system(APP_NAME ".exe");
} else {
system("./" APP_NAME);
}
#endif
}
return 0;
}
global char* Strings[TOOLCHAIN_COUNT][COMMAND_COUNT] = {
[TOOLCHAIN_CLANG] = {
[COMMAND_COMPILER ] = ""
" clang", // clang with mold linker
[COMMAND_CODE_FILE] = ""
" " CODE_FILE,
[COMMAND_OUTPUT] = ""
" -o " APP_NAME // output executable name
,
[COMMAND_WARNINGS] = ""
" -Werror" // treat all warnings as errors
" -Wall" // all warnings (not all warnings)
" -Wno-missing-braces" //
" -Wno-unused-function" //
,
[COMMAND_FLAGS] = ""
" -std=c17" // C17 language standard
" -pedantic" // ISO conformance
" -fno-gnu-keywords" // disables gnu extensions
,
[COMMAND_DEBUG_FLAGS] = ""
" -O0"
" -g"
,
[COMMAND_RELEASE_FLAGS] = ""
" -ffast-math" // enables fast math
" -O3" // level 3 optimisation
//" -fopenmp" // TODO: openmp seems to be a major cause of slowdown, especially when printing progress
,
[COMMAND_LINKER] = ""
" -L../libs/" // add library path
" -lm" // link to maths library
,
[COMMAND_DEBUG_LINKER] = ""
" -debug" // debug linking
,
[COMMAND_RELEASE_LINKER] = ""
,
},
[TOOLCHAIN_GCC] = {
[COMMAND_COMPILER ] = ""
" gcc", // clang with mold linker
[COMMAND_CODE_FILE] = ""
" " CODE_FILE,
[COMMAND_OUTPUT] = ""
" -o " APP_NAME // output executable name
,
[COMMAND_WARNINGS] = ""
" -Werror" // treat all warnings as errors
" -Wall" // all warnings (not all warnings)
" -Wno-missing-braces" //
" -Wno-unused-function" //
" -Wno-unknown-pragmas" //
,
[COMMAND_FLAGS] = ""
" -std=c17" // C17 language standard
" -pedantic" // ISO conformance
,
[COMMAND_DEBUG_FLAGS] = ""
" -O0"
" -g"
,
[COMMAND_RELEASE_FLAGS] = ""
" -ffast-math" // enables fast math
" -O3" // level 3 optimisation
//" -fopenmp" // openmp supprot
,
[COMMAND_LINKER] = ""
" -L../libs/" // add library path
" -lm" // link to maths library
,
[COMMAND_DEBUG_LINKER] = ""
,
[COMMAND_RELEASE_LINKER] = ""
,
},
[TOOLCHAIN_MINGW] = {
[COMMAND_COMPILER ] = ""
" x86_64-w64-mingw32-gcc", // clang with mold linker
[COMMAND_CODE_FILE] = ""
" " CODE_FILE,
[COMMAND_OUTPUT] = ""
" -o " APP_NAME // output executable name
,
[COMMAND_WARNINGS] = ""
" -Werror" // treat all warnings as errors
" -Wall" // all warnings (not all warnings)
" -Wno-missing-braces" //
" -Wno-unused-function" //
" -Wno-unknown-pragmas" //
,
[COMMAND_FLAGS] = ""
" -std=c17" // C17 language standard
" -pedantic" // ISO conformance
,
[COMMAND_DEBUG_FLAGS] = ""
" -O0"
" -g"
,
[COMMAND_RELEASE_FLAGS] = ""
" -ffast-math" // enables fast math
" -O3" // level 3 optimisation
//" -fopenmp" // openmp support
,
[COMMAND_LINKER] = ""
" -L../libs/" // add library path
" -lm" // link to maths library
,
[COMMAND_DEBUG_LINKER] = ""
,
[COMMAND_RELEASE_LINKER] = ""
,
},
[TOOLCHAIN_MSVC] = {
[COMMAND_COMPILER] = ""
" cl", // msvc
[COMMAND_CODE_FILE] = ""
" " CODE_FILE,
[COMMAND_OUTPUT] = ""
" -FAsu" // generate assembly with utf8 source
" -Fa" APP_NAME // output assembly name
" -Fe" APP_NAME // output executable name
" -Fm" APP_NAME // output map file name
" -Fo" APP_NAME // output object file name
" -Zi" // generate debug info
,
[COMMAND_WARNINGS] = ""
" -WX" // treat all warnings as errors
" -W4" // warning level 4
" -wd4100" // unreferenced formal parameter
" -wd4101" // unreferenced local variable
" -wd4668" // 'symbol' is not defined as a preprocessor macro, replacing with '0' for 'directives'
" -wd4820" // 'bytes' bytes padding added after construct 'member_name'
" -wd5045" // Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified
,
[COMMAND_FLAGS] = ""
" -nologo" // Suppresses display of sign-on banner.
//" -analyze" // Enables code analysis.
" -EHa-" // Disables exception handling
" -FC" // Displays the full path of source code files passed to cl.exe in diagnostic text.
" -GL" // Enables whole program optimization.
" -Gm-" // Disables minimal rebuild.
" -GR-" // Disables run-time type information (RTTI).
" -GS-" // Disables checking buffer security.
" -Gw" // Enables whole-program global data optimization.
" -Gy" // Enables function-level linking.
" -std:c17" // Enables ISO C17 conformance
" -TC" // Specifies all source files are C.
" -utf-8" // Set source and execution character sets to UTF-8.
,
[COMMAND_DEBUG_FLAGS] = ""
" -fp:precise" // Specifies how the compiler treats floating-point expressions, optimizations, and exceptions.
" -MTd" // Compiles to create a debug multithreaded executable file, by using LIBCMTD.lib.
" -Oi" // Generates intrinsic functions.
,
[COMMAND_RELEASE_FLAGS] = ""
" -fp:fast" // Specifies how the compiler treats floating-point expressions, optimizations, and exceptions.
" -MT" // Compiles to create a multithreaded executable file, by using LIBCMT.lib.
" -O2" // Creates fast code.
//" -openmp" // Enables #pragma omp in source code.
,
[COMMAND_LINKER] = ""
" -link" // linker flag
" -nologo" // Suppresses the startup banner.
" -incremental:no" // Controls incremental linking.
" -opt:ref,icf=4" // Eliminates functions and data that are never referenced, perform identical COMDAT folding
" -subsystem:console" // Tells the operating system how to run the .exe file.
" -libpath:../libs/" // Specifies a path to search before the environmental library path.
,
[COMMAND_DEBUG_LINKER] = ""
" -debug:full" // moves all private symbol information from individual compilation products (object files and libraries) into a single PDB
,
[COMMAND_RELEASE_LINKER] = ""
" -LTCG" // Specifies link-time code generation.
,
},
[TOOLCHAIN_PELLESC] = {
[COMMAND_COMPILER] = ""
" pocc"
,
[COMMAND_CODE_FILE] = ""
" " CODE_FILE
,
[COMMAND_OUTPUT] = ""
" -Fo" APP_NAME
,
[COMMAND_WARNINGS] = ""
" -W2" // Set warning level 0, 1 or 2 (default: 1)
//" -Wd<n>" // Disable warning #n
,
[COMMAND_FLAGS] = ""
" -arch:SSE2" // Select X64 architecture AVX, AVX2, or SSE2 (default: SSE2)
" -GT" // Generate fiber-safe access for thread local storage
//" -I<path>" // Add a search path for #include files
//" -J" // Default char type is unsigned
//" -MT" // Enable multi-threading support (CRTMT*.LIB)
" -std:C17" // Select language mode C17, C11 or C99 (default: C17)
,
[COMMAND_DEBUG_FLAGS] = ""
//" -fp:PRECISE" // Set floating-point model PRECISE or FAST (default: PRECISE)
" -Gi" // Enable trap for signed integer overflow
" -Zi" // Enable full debugging information
,
[COMMAND_RELEASE_FLAGS] = ""
" -fp:FAST" // Set floating-point model PRECISE or FAST (default: PRECISE)
//" -openmp" // Enable OpenMP 3.1 extensions
" -Ot" // Optimise (favouring: Os/Ot - space/speed)
" -Ox" // Perform maximum optimisations
,
[COMMAND_LINKER] = ""
,
[COMMAND_DEBUG_LINKER] = ""
,
[COMMAND_RELEASE_LINKER] = ""
,
},
};
|
monodomain_solver.c | //
// Created by sachetto on 03/10/17.
//
#include "monodomain_solver.h"
#include "../utils/logfile_utils.h"
#include "../utils/stop_watch.h"
#ifdef COMPILE_CUDA
#include "../gpu_utils/gpu_utils.h"
#endif
#ifdef COMPILE_OPENGL
#include "../draw/draw.h"
#endif
#include <assert.h>
#include <inttypes.h>
#include "../string/sds.h"
#include "config/domain_config.h"
#include "config/purkinje_config.h"
#include "config/assembly_matrix_config.h"
#include "config/linear_system_solver_config.h"
static inline double ALPHA (double beta, double cm, double dt, double h) {
return (((beta * cm) / dt) * UM2_TO_CM2) * pow (h, 3.0);
}
struct monodomain_solver *new_monodomain_solver () {
struct monodomain_solver *result = (struct monodomain_solver *)malloc (sizeof (struct monodomain_solver));
result->beta = 0.14;
result->cm = 1.0;
return result;
}
void solve_monodomain (struct monodomain_solver *the_monodomain_solver, struct ode_solver *the_ode_solver,
struct grid *the_grid, struct user_options *configs) {
assert (configs);
assert (the_grid);
assert (the_monodomain_solver);
assert (the_ode_solver);
#ifdef COMPILE_OPENGL
if(configs->draw) {
grid_to_draw = the_grid;
}
#endif
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
long ode_total_time = 0, cg_total_time = 0, total_write_time = 0, total_mat_time = 0, total_ref_time = 0,
total_deref_time = 0, cg_partial, total_config_time = 0;
uint32_t total_cg_it = 0;
struct stop_watch solver_time, ode_time, cg_time, part_solver, part_mat, write_time, ref_time, deref_time,
config_time;
init_stop_watch (&config_time);
start_stop_watch (&config_time);
///////MAIN CONFIGURATION BEGIN//////////////////
init_ode_solver_with_cell_model (the_ode_solver);
struct stim_config_hash *stimuli_configs = configs->stim_configs;
struct extra_data_config *extra_data_config = configs->extra_data_config;
//struct domain_config *domain_config = configs->domain_config;
struct purkinje_config *purkinje_config = configs->purkinje_config;
struct assembly_matrix_config *assembly_matrix_config = configs->assembly_matrix_config;
struct linear_system_solver_config *linear_system_solver_config = configs->linear_system_solver_config;
bool has_extra_data = (extra_data_config != NULL);
double last_stimulus_time = -1.0;
if (stimuli_configs)
{
// Init all stimuli
STIM_CONFIG_HASH_FOR_EACH_KEY_APPLY_FN_IN_VALUE_AND_KEY (stimuli_configs, init_stim_functions);
//Find last stimuli
size_t s_size = stimuli_configs->size;
double s_end;
for (int i = 0; i < s_size; i++)
{
for (struct stim_config_elt *e = stimuli_configs->table[i % s_size]; e != 0; e = e->next)
{
s_end = e->value->stim_start + e->value->stim_duration;
if(s_end > last_stimulus_time) last_stimulus_time = s_end;
}
}
}
// Configure the functions and set the mesh domain
/*
if (domain_config)
{
init_domain_functions (domain_config);
domain_config->set_spatial_domain (domain_config, the_grid);
}
else
{
print_to_stdout_and_file ("No domain configuration provided! Exiting!\n");
exit (EXIT_FAILURE);
}
*/
// Configure the functions and set the Purkinje mesh domain
if (purkinje_config)
{
init_purkinje_functions(purkinje_config);
purkinje_config->set_spatial_purkinje(purkinje_config,the_grid);
}
else
{
print_to_stdout_and_file ("No Purkinje configuration provided! Exiting!\n");
exit (EXIT_FAILURE);
}
if (assembly_matrix_config)
{
init_assembly_matrix_functions (assembly_matrix_config);
}
else
{
print_to_stdout_and_file ("No assembly matrix configuration provided! Exiting!\n");
exit (EXIT_FAILURE);
}
if (linear_system_solver_config)
{
init_linear_system_solver_functions (linear_system_solver_config);
}
else
{
print_to_stdout_and_file ("No linear solver configuration provided! Exiting!\n");
exit (EXIT_FAILURE);
}
if (has_extra_data)
{
init_extra_data_functions (extra_data_config);
}
///////MAIN CONFIGURATION END//////////////////
//int refine_each = the_monodomain_solver->refine_each;
//int derefine_each = the_monodomain_solver->derefine_each;
//bool redo_matrix;
bool activity;
bool gpu = the_ode_solver->gpu;
int count = 0;
//double refinement_bound = the_monodomain_solver->refinement_bound;
//double derefinement_bound = the_monodomain_solver->derefinement_bound;
//double start_h = domain_config->start_h;
//double max_h = domain_config->max_h;
//double start_h = purkinje_config->start_h;
//double max_h = purkinje_config->start_h;
bool adaptive = the_grid->adaptive;
//double start_adpt_at = the_monodomain_solver->start_adapting_at;
bool save_to_file = (configs->out_dir_name != NULL);
double dt_edp = the_monodomain_solver->dt;
double finalT = the_monodomain_solver->final_time;
double beta = the_monodomain_solver->beta;
double cm = the_monodomain_solver->cm;
double dt_edo = the_ode_solver->min_dt;
#ifdef COMPILE_CUDA
if (gpu) {
int device_count;
int device = the_ode_solver->gpu_id;
check_cuda_errors (cudaGetDeviceCount (&device_count));
struct cudaDeviceProp prop;
check_cuda_errors (cudaGetDeviceProperties (&prop, the_ode_solver->gpu_id));
print_to_stdout_and_file ("%d devices available, running on Device %d: %s\n", device_count, device, prop.name);
check_cuda_errors (cudaSetDevice (device));
}
#endif
order_grid_cells (the_grid);
uint32_t original_num_cells = the_grid->num_active_cells;
save_old_cell_positions (the_grid);
if (adaptive)
{
update_cells_to_solve (the_grid, the_ode_solver);
}
print_to_stdout_and_file ("Setting ODE's initial conditions\n");
set_ode_initial_conditions_for_all_volumes (the_ode_solver, the_grid->num_active_cells);
double initial_v = the_ode_solver->model_data.initial_v;
total_config_time = stop_stop_watch (&config_time);
print_solver_info (the_monodomain_solver, the_ode_solver, the_grid, configs);
int ode_step = 1;
if (dt_edp >= dt_edo)
{
ode_step = (int)(dt_edp / dt_edo);
print_to_stdout_and_file ("Solving EDO %d times before solving PDE\n", ode_step);
}
else
{
print_to_stdout_and_file ("WARNING: EDO time step is greater than PDE time step. Adjusting to EDO time "
"step: %lf\n",
dt_edo);
dt_edp = dt_edo;
}
fflush (stdout);
init_stop_watch (&solver_time);
init_stop_watch (&ode_time);
init_stop_watch (&cg_time);
init_stop_watch (&part_solver);
init_stop_watch (&part_mat);
init_stop_watch (&write_time);
init_stop_watch (&ref_time);
init_stop_watch (&deref_time);
print_to_stdout_and_file ("Assembling Monodomain Matrix Begin\n");
start_stop_watch (&part_mat);
set_initial_conditions_all_volumes (the_monodomain_solver, the_grid, initial_v);
assembly_matrix_config->assembly_matrix(assembly_matrix_config, the_monodomain_solver, the_grid);
total_mat_time = stop_stop_watch (&part_mat);
print_to_stdout_and_file ("Assembling Monodomain Matrix End\n");
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
start_stop_watch (&solver_time);
int print_rate = configs->print_rate;
bool abort_on_no_activity = the_monodomain_solver->abort_on_no_activity;
double solver_error;
uint32_t solver_iterations = 0;
if (stimuli_configs)
set_spatial_stim(stimuli_configs, the_grid);
if (has_extra_data)
set_ode_extra_data (extra_data_config, the_grid, the_ode_solver);
bool save_in_binary = configs->binary;
double cur_time = 0.0;
print_to_stdout_and_file ("Starting simulation\n");
while (cur_time <= finalT)
{
#ifdef COMPILE_OPENGL
redraw = count % print_rate == 0; //redraw grid
#endif
if (save_to_file)
{
if (count % print_rate == 0)
{
start_stop_watch (&write_time);
activity = print_result(the_grid, configs, count, save_in_binary);
total_write_time += stop_stop_watch (&write_time);
if (abort_on_no_activity)
{
if (!activity)
{
print_to_stdout_and_file ("No activity, aborting simulation\n");
break;
}
}
}
}
if (cur_time > 0.0)
{
update_ode_state_vector (the_ode_solver, the_grid, original_num_cells);
}
start_stop_watch (&ode_time);
solve_all_volumes_odes (the_ode_solver, the_grid->num_active_cells, cur_time, ode_step, stimuli_configs);
update_monodomain (original_num_cells, the_grid->num_active_cells, the_grid->active_cells, beta, cm, dt_edp,
the_ode_solver->sv, the_ode_solver->model_data.number_of_ode_equations, gpu);
ode_total_time += stop_stop_watch (&ode_time);
start_stop_watch (&cg_time);
linear_system_solver_config->solve_linear_system(linear_system_solver_config, the_grid, &solver_iterations, &solver_error);
cg_partial = stop_stop_watch (&cg_time);
cg_total_time += cg_partial;
total_cg_it += solver_iterations;
if (count % print_rate == 0)
{
print_to_stdout_and_file ("t = %lf, Iterations = "
"%" PRIu32 ", Error Norm = %e, Number of Cells:"
"%" PRIu32 ", Iterations time: %ld us\n",
cur_time, solver_iterations, solver_error, the_grid->num_active_cells, cg_partial);
}
count++;
cur_time += dt_edp;
}
print_to_stdout_and_file ("Resolution Time: %ld μs\n", stop_stop_watch (&solver_time));
print_to_stdout_and_file ("ODE Total Time: %ld μs\n", ode_total_time);
print_to_stdout_and_file ("CG Total Time: %ld μs\n", cg_total_time);
print_to_stdout_and_file ("Mat time: %ld μs\n", total_mat_time);
print_to_stdout_and_file ("Refine time: %ld μs\n", total_ref_time);
print_to_stdout_and_file ("Derefine time: %ld μs\n", total_deref_time);
print_to_stdout_and_file ("Write time: %ld μs\n", total_write_time);
print_to_stdout_and_file ("Initial configuration time: %ld μs\n", total_config_time);
print_to_stdout_and_file ("CG Total Iterations: %u\n", total_cg_it);
}
bool print_result(const struct grid *the_grid, const struct user_options *configs, int count, bool save_in_binary) {
bool activity;
sds tmp = sdsnew (configs->out_dir_name);
sds c = sdsfromlonglong (count);
tmp = sdscat (tmp, "/V_t_");
tmp = sdscat (tmp, c);
FILE *f1 = fopen (tmp, "w");
activity = print_grid_and_check_for_activity (the_grid, f1, count, save_in_binary);
fclose (f1);
sdsfree (tmp);
sdsfree (c);
return activity;
}
void set_spatial_stim(struct stim_config_hash *stim_configs, struct grid *the_grid) {
struct stim_config *tmp = NULL;
for (int i = 0; i < stim_configs->size; i++) {
for (struct stim_config_elt *e = stim_configs->table[i % stim_configs->size]; e != 0; e = e->next) {
tmp = e->value;
tmp->set_spatial_stim (tmp, the_grid);
}
}
}
void set_ode_extra_data (struct extra_data_config *config, struct grid *the_grid, struct ode_solver *the_ode_solver) {
free (the_ode_solver->edo_extra_data);
the_ode_solver->edo_extra_data =
config->set_extra_data (the_grid, config->config_data.config, &(the_ode_solver->extra_data_size));
}
void update_ode_state_vector (struct ode_solver *the_ode_solver, struct grid *the_grid, uint32_t max_number_of_cells) {
uint32_t n_active = the_grid->num_active_cells;
struct cell_node **ac = the_grid->active_cells;
int n_edos = the_ode_solver->model_data.number_of_ode_equations;
real *sv = the_ode_solver->sv;
int i;
if (the_ode_solver->gpu) {
#ifdef COMPILE_CUDA
real *vms;
size_t mem_size = max_number_of_cells * sizeof (real);
vms = (real *)malloc (mem_size);
check_cuda_errors (cudaMemcpy (vms, sv, mem_size, cudaMemcpyDeviceToHost));
#pragma omp parallel for
for (i = 0; i < n_active; i++) {
vms[ac[i]->sv_position] = (real)ac[i]->v;
}
check_cuda_errors (cudaMemcpy (sv, vms, mem_size, cudaMemcpyHostToDevice));
free (vms);
#endif
} else {
#pragma omp parallel for
for (i = 0; i < n_active; i++) {
sv[ac[i]->sv_position * n_edos] = (real)ac[i]->v;
}
}
}
void save_old_cell_positions (struct grid *the_grid)
{
uint32_t n_active = the_grid->num_active_cells;
struct cell_node **ac = the_grid->active_cells;
int i;
#pragma omp parallel for
for (i = 0; i < n_active; i++)
{
ac[i]->sv_position = ac[i]->grid_position;
}
}
void update_cells_to_solve (struct grid *the_grid, struct ode_solver *solver) {
uint32_t n_active = the_grid->num_active_cells;
struct cell_node **ac = the_grid->active_cells;
if (solver->cells_to_solve) {
free (solver->cells_to_solve);
}
solver->cells_to_solve = (uint32_t *)malloc (the_grid->num_active_cells * sizeof (uint32_t));
uint32_t *cts = solver->cells_to_solve;
int i;
#pragma omp parallel for
for (i = 0; i < n_active; i++) {
cts[i] = ac[i]->sv_position;
}
}
void set_initial_conditions_all_volumes (struct monodomain_solver *the_solver, struct grid *the_grid, double initial_v)
{
double alpha, h;
struct cell_node **ac = the_grid->active_cells;
uint32_t active_cells = the_grid->num_active_cells;
double beta = the_solver->beta;
double cm = the_solver->cm;
double dt = the_solver->dt;
int i;
#pragma omp parallel for private(alpha, h)
for (i = 0; i < active_cells; i++)
{
h = ac[i]->face_length;
alpha = ALPHA (beta, cm, dt, h);
ac[i]->v = initial_v;
ac[i]->b = initial_v * alpha;
}
}
void update_monodomain (uint32_t initial_number_of_cells, uint32_t num_active_cells, struct cell_node **active_cells,
double beta, double cm, double dt_edp, real *sv, int n_equations_cell_model, bool use_gpu)
{
double h, alpha;
#ifdef COMPILE_CUDA
real *vms = NULL;
size_t mem_size = initial_number_of_cells * sizeof (real);
if (use_gpu)
{
vms = (real *)malloc (mem_size);
check_cuda_errors (cudaMemcpy (vms, sv, mem_size, cudaMemcpyDeviceToHost));
}
#endif
int i;
#pragma omp parallel for private(h, alpha)
for (i = 0; i < num_active_cells; i++)
{
h = active_cells[i]->face_length;
alpha = ALPHA (beta, cm, dt_edp, h);
if (use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b = vms[active_cells[i]->sv_position] * alpha;
#endif
}
else
{
active_cells[i]->b = sv[active_cells[i]->sv_position * n_equations_cell_model] * alpha;
}
}
#ifdef COMPILE_CUDA
free (vms);
#endif
}
void print_solver_info (struct monodomain_solver *the_monodomain_solver, struct ode_solver *the_ode_solver,
struct grid *the_grid, struct user_options *options) {
print_to_stdout_and_file ("System parameters: \n");
#if defined(_OPENMP)
print_to_stdout_and_file ("Using OpenMP with %d threads\n", omp_get_max_threads ());
#endif
if (the_ode_solver->gpu) {
print_to_stdout_and_file ("Using GPU to solve ODEs\n");
}
print_to_stdout_and_file ("Initial V: %lf\n", the_ode_solver->model_data.initial_v);
print_to_stdout_and_file ("Number of ODEs in cell model: %d\n", the_ode_solver->model_data.number_of_ode_equations);
print_to_stdout_and_file ("Sigma X = %.10lf, Sigma Y = %.10lf, Sigma Z = %.10lf\n", the_monodomain_solver->sigma_x,
the_monodomain_solver->sigma_y, the_monodomain_solver->sigma_z);
print_to_stdout_and_file ("Beta = %.10lf, Cm = %.10lf\n", the_monodomain_solver->beta, the_monodomain_solver->cm);
print_to_stdout_and_file ("Initial N. of Elements = "
"%" PRIu32 "\n",
the_grid->num_active_cells);
print_to_stdout_and_file ("PDE time step = %lf\n", the_monodomain_solver->dt);
print_to_stdout_and_file ("ODE min time step = %lf\n", the_ode_solver->min_dt);
print_to_stdout_and_file ("Simulation Final Time = %lf\n", the_monodomain_solver->final_time);
print_to_stdout_and_file ("Maximum CG iterations = %d\n", the_monodomain_solver->max_iterations);
print_to_stdout_and_file ("CG tolerance = %e\n", the_monodomain_solver->tolerance);
if (the_monodomain_solver->use_jacobi) {
print_to_stdout_and_file ("Using Jacobi preconditioner\n");
}
if (the_grid->adaptive) {
print_to_stdout_and_file ("Using adaptativity\n");
print_to_stdout_and_file ("Refinement Bound = %lf\n", the_monodomain_solver->refinement_bound);
print_to_stdout_and_file ("Derefinement Bound = %lf\n", the_monodomain_solver->derefinement_bound);
print_to_stdout_and_file ("Refining each %d time steps\n", the_monodomain_solver->refine_each);
print_to_stdout_and_file ("Derefining each %d time steps\n", the_monodomain_solver->derefine_each);
}
print_to_stdout_and_file ("Print Rate = %d\n", options->print_rate);
if (options->out_dir_name != NULL) {
if (options->binary) {
print_to_stdout_and_file ("Saving using binary output in %s dir\n", options->out_dir_name);
} else {
print_to_stdout_and_file ("Saving to plain text output in %s dir\n", options->out_dir_name);
}
} else {
print_to_stdout_and_file ("The solution will not be saved\n");
}
if (options->stim_configs)
{
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
if (options->stim_configs->size == 1)
print_to_stdout_and_file ("Stimulus configuration:\n");
else {
print_to_stdout_and_file ("Stimuli configuration:\n");
}
for (int i = 0; i < options->stim_configs->size; i++) {
for (struct stim_config_elt *e = options->stim_configs->table[i % options->stim_configs->size]; e != 0;
e = e->next) {
print_to_stdout_and_file ("Stimulus name: %s\n", e->key);
print_to_stdout_and_file ("Stimulus start: %lf\n", e->value->stim_start);
print_to_stdout_and_file ("Stimulus duration: %lf\n", e->value->stim_duration);
print_to_stdout_and_file ("Stimulus current: %lf\n", e->value->stim_current);
print_to_stdout_and_file ("Stimulus library: %s\n", e->value->config_data.library_file_path);
print_to_stdout_and_file ("Stimulus function: %s\n", e->value->config_data.function_name);
struct string_hash *tmp = e->value->config_data.config;
if (tmp->n == 1) {
print_to_stdout_and_file ("Stimulus extra parameter:\n");
} else if (tmp->n > 1) {
print_to_stdout_and_file ("Stimulus extra parameters:\n");
}
STRING_HASH_PRINT_KEY_VALUE_LOG (tmp);
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
}
}
}
if (options->domain_config)
{
print_to_stdout_and_file ("Domain configuration:\n");
print_to_stdout_and_file ("Domain name: %s\n", options->domain_config->domain_name);
print_to_stdout_and_file ("Domain initial Space Discretization: %lf um\n", options->domain_config->start_h);
if (the_grid->adaptive) {
print_to_stdout_and_file ("Domain maximum Space Discretization: %lf um\n", options->domain_config->max_h);
print_to_stdout_and_file ("The adaptivity will start in time: %lf ms\n",
the_monodomain_solver->start_adapting_at);
}
if (options->domain_config->config_data.config->n == 1) {
print_to_stdout_and_file ("Domain extra parameter:\n");
} else if (options->domain_config->config_data.config->n > 1) {
print_to_stdout_and_file ("Domain extra parameters:\n");
}
STRING_HASH_PRINT_KEY_VALUE_LOG (options->domain_config->config_data.config);
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
}
if (options->purkinje_config)
{
print_to_stdout_and_file ("Purkinje configuration:\n");
print_to_stdout_and_file ("Purkinje network name: %s\n", options->purkinje_config->domain_name);
print_to_stdout_and_file ("Purkinje network initial Space Discretization: %lf um\n", options->purkinje_config->start_h);
if (options->purkinje_config->config_data.config->n == 1)
{
print_to_stdout_and_file ("Purkinje extra parameter:\n");
}
else if (options->purkinje_config->config_data.config->n > 1) {
print_to_stdout_and_file ("Purkinje extra parameters:\n");
}
STRING_HASH_PRINT_KEY_VALUE_LOG (options->purkinje_config->config_data.config);
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
}
if (options->extra_data_config) {
print_to_stdout_and_file ("Extra data ODE function configuration:\n");
print_to_stdout_and_file ("Extra data library: %s\n",
options->extra_data_config->config_data.library_file_path);
print_to_stdout_and_file ("Extra data function: %s\n", options->extra_data_config->config_data.function_name);
if (options->domain_config->config_data.config->n == 1) {
print_to_stdout_and_file ("Extra data parameter:\n");
} else if (options->domain_config->config_data.config->n > 1) {
print_to_stdout_and_file ("Extra data parameters:\n");
}
STRING_HASH_PRINT_KEY_VALUE_LOG (options->extra_data_config->config_data.config);
print_to_stdout_and_file (LOG_LINE_SEPARATOR);
}
}
void configure_monodomain_solver_from_options (struct monodomain_solver *the_monodomain_solver,
struct user_options *options) {
assert (the_monodomain_solver);
assert (options);
the_monodomain_solver->tolerance = options->cg_tol;
the_monodomain_solver->num_threads = options->num_threads;
the_monodomain_solver->max_iterations = options->max_its;
the_monodomain_solver->final_time = options->final_time;
the_monodomain_solver->refine_each = options->refine_each;
the_monodomain_solver->derefine_each = options->derefine_each;
the_monodomain_solver->refinement_bound = options->ref_bound;
the_monodomain_solver->derefinement_bound = options->deref_bound;
the_monodomain_solver->abort_on_no_activity = options->abort_no_activity;
the_monodomain_solver->dt = options->dt_edp;
the_monodomain_solver->use_jacobi = options->use_jacobi;
the_monodomain_solver->sigma_x = options->sigma_x;
the_monodomain_solver->sigma_y = options->sigma_y;
the_monodomain_solver->sigma_z = options->sigma_z;
the_monodomain_solver->beta = options->beta;
the_monodomain_solver->cm = options->cm;
the_monodomain_solver->start_adapting_at = options->start_adapting_at;
}
|
DRB037-truedepseconddimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
The inner loop has true dependence.
Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15
*/
#include <stdlib.h>
#include <stdio.h>
double b[1000][1000];
int main(int argc, char* argv[])
{
int i,j;
int n=1000, m=1000;
#pragma omp parallel for private(j)
for (i=0;i<n;i++)
#pragma omp parallel for simd
for (j=1;j<m;j++)
b[i][j]= i * m + j;
#pragma omp parallel for simd private(j)
for (i=0;i<n;i++)
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
#pragma omp parallel for private(j) ordered
for (i=0;i<n;i++)
#pragma omp parallel for simd ordered
for (j=1;j<m;j++)
#pragma omp ordered simd
printf("%lf\n",b[i][j]);
return 0;
}
|
struct_axpy.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Structured axpy routine
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* hypre_StructAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructAxpy( double alpha,
hypre_StructVector *x,
hypre_StructVector *y )
{
hypre_Box *x_data_box;
hypre_Box *y_data_box;
HYPRE_Int xi;
HYPRE_Int yi;
double *xp;
double *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
x_data_box, start, unit_stride, xi,
y_data_box, start, unit_stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, yi)
{
yp[yi] += alpha * xp[xi];
}
hypre_BoxLoop2End(xi, yi);
}
return hypre_error_flag;
}
|
b05c988_so12.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
};
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data;
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size);
int sf = 6;
int t_blk_size = 2 * sf * (time_M - time_m);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1)
{
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1)
{
for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(damp, usol, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r7 = -2.98277778F * usol[t1][x - time + 12][y - time + 12][z + 12];
float r6 = 1.0 / dt;
float r5 = 1.0 / (dt * dt);
float r4 = 1.0 / (vp[x - time + 12][y - time + 12][z + 12] * vp[x - time + 12][y - time + 12][z + 12]);
usol[t0][x - time + 12][y - time + 12][z + 12] = (r4 * (-r5 * (-2.0F * usol[t1][x - time + 12][y - time + 12][z + 12] + usol[t2][x - time + 12][y - time + 12][z + 12])) + r6 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 12][y - time + 12][z + 12]) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 12][z + 6] + usol[t1][x - time + 12][y - time + 12][z + 18]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 12][z + 7] + usol[t1][x - time + 12][y - time + 12][z + 17]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 12][z + 8] + usol[t1][x - time + 12][y - time + 12][z + 16]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 12][z + 9] + usol[t1][x - time + 12][y - time + 12][z + 15]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 12][z + 10] + usol[t1][x - time + 12][y - time + 12][z + 14]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 12][z + 11] + usol[t1][x - time + 12][y - time + 12][z + 13])) / ((h_z * h_z)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 12][y - time + 6][z + 12] + usol[t1][x - time + 12][y - time + 18][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 12][y - time + 7][z + 12] + usol[t1][x - time + 12][y - time + 17][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 12][y - time + 8][z + 12] + usol[t1][x - time + 12][y - time + 16][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 12][y - time + 9][z + 12] + usol[t1][x - time + 12][y - time + 15][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 12][y - time + 10][z + 12] + usol[t1][x - time + 12][y - time + 14][z + 12]) + 1.71428571F * (usol[t1][x - time + 12][y - time + 11][z + 12] + usol[t1][x - time + 12][y - time + 13][z + 12])) / ((h_y * h_y)) + (r7 - 6.01250601e-5F * (usol[t1][x - time + 6][y - time + 12][z + 12] + usol[t1][x - time + 18][y - time + 12][z + 12]) + 1.03896104e-3F * (usol[t1][x - time + 7][y - time + 12][z + 12] + usol[t1][x - time + 17][y - time + 12][z + 12]) - 8.92857143e-3F * (usol[t1][x - time + 8][y - time + 12][z + 12] + usol[t1][x - time + 16][y - time + 12][z + 12]) + 5.29100529e-2F * (usol[t1][x - time + 9][y - time + 12][z + 12] + usol[t1][x - time + 15][y - time + 12][z + 12]) - 2.67857143e-1F * (usol[t1][x - time + 10][y - time + 12][z + 12] + usol[t1][x - time + 14][y - time + 12][z + 12]) + 1.71428571F * (usol[t1][x - time + 11][y - time + 12][z + 12] + usol[t1][x - time + 13][y - time + 12][z + 12])) / ((h_x * h_x))) / (r4 * r5 + r6 * damp[x - time + 1][y - time + 1][z + 1]);
}
#pragma omp simd aligned(damp, usol, vp : 32)
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
usol[t0][x - time + 12][y - time + 12][zind + 12] += r0;}
}
}
}
}
}
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
return 0;
}
|
GB.h | //------------------------------------------------------------------------------
// GB.h: definitions visible only inside GraphBLAS
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// These defintions are not visible to the user. They are used only inside
// GraphBLAS itself.
// Future plans: (see also 'grep -r FUTURE')
// FUTURE: support for dense matrices (A->i and A->p as NULL pointers)
// FUTURE: implement v1.3 of the API
// FUTURE: add matrix I/O in binary format (see draft LAGraph_binread/binwrite)
// FUTURE: add Heap method to GB_AxB_saxpy3 (inspector-executor style)
// FUTURE: allow matrices and vectors to be left jumbled (sort left pending)
#ifndef GB_H
#define GB_H
//------------------------------------------------------------------------------
// code development settings
//------------------------------------------------------------------------------
// to turn on Debug for a single file of GraphBLAS, add:
// #define GB_DEBUG
// just before the statement:
// #include "GB.h"
// set GB_BURBLE to 1 to enable extensive diagnostic output, or compile with
// -DGB_BURBLE=1. This setting can also be added at the top of any individual
// Source/* files, before #including any other files.
#ifndef GB_BURBLE
#define GB_BURBLE 0
#endif
// to turn on Debug for all of GraphBLAS, uncomment this line:
// #define GB_DEBUG
// to reduce code size and for faster time to compile, uncomment this line;
// GraphBLAS will be slower. Alternatively, use cmake with -DGBCOMPACT=1
// #define GBCOMPACT 1
// for code development only
// #define GB_DEVELOPER 1
// set these via cmake, or uncomment to select the user-thread model:
// #define USER_POSIX_THREADS
// #define USER_OPENMP_THREADS
// #define USER_NO_THREADS
//------------------------------------------------------------------------------
// manage compiler warnings
//------------------------------------------------------------------------------
#if defined __INTEL_COMPILER
// 10397: remark about where *.optrpt reports are placed
// 15552: loop not vectorized
#pragma warning (disable: 10397 15552 )
// disable icc -w2 warnings
// 191: type qualifier meangingless
// 193: zero used for undefined #define
// 589: bypass initialization
#pragma warning (disable: 191 193 )
// disable icc -w3 warnings
// 144: initialize with incompatible pointer
// 181: format
// 869: unused parameters
// 1572: floating point comparisons
// 1599: shadow
// 2259: typecasting may lose bits
// 2282: unrecognized pragma
// 2557: sign compare
#pragma warning (disable: 144 181 869 1572 1599 2259 2282 2557 )
// See GB_unused.h, for warnings 177 and 593, which are not globally
// disabled, but selectively by #include'ing GB_unused.h as needed.
// resolved (warnings no longer disabled globally):
// 58: sign compare
// 167: incompatible pointer
// 177: declared but unused
// 186: useless comparison
// 188: mixing enum types
// 593: set but not used
// 981: unspecified order
// 1418: no external declaration
// 1419: external declaration in source file
// 2330: const incompatible
// 2547: remark about include files
// 3280: shadow
#elif defined __GNUC__
// disable warnings for gcc 5.x and higher:
#if (__GNUC__ > 4)
// disable warnings
// #pragma GCC diagnostic ignored "-Wunknown-warning-option"
#pragma GCC diagnostic ignored "-Wint-in-bool-context"
#pragma GCC diagnostic ignored "-Wformat-truncation="
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
// enable these warnings as errors
#pragma GCC diagnostic error "-Wmisleading-indentation"
#endif
// disable warnings from -Wall -Wextra -Wpendantic
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wsign-compare"
#if defined ( __cplusplus )
#pragma GCC diagnostic ignored "-Wwrite-strings"
#else
#pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
#endif
// See GB_unused.h, where these two pragmas are used:
// #pragma GCC diagnostic ignored "-Wunused-but-set-variable"
// #pragma GCC diagnostic ignored "-Wunused-variable"
// resolved (warnings no longer disabled globally):
// #pragma GCC diagnostic ignored "-Wunknown-pragmas"
// #pragma GCC diagnostic ignored "-Wtype-limits"
// #pragma GCC diagnostic ignored "-Wunused-result"
// #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers"
// enable these warnings as errors
#pragma GCC diagnostic error "-Wswitch-default"
#if !defined ( __cplusplus )
#pragma GCC diagnostic error "-Wmissing-prototypes"
#endif
// #pragma GCC diagnostic error "-Wdouble-promotion"
#endif
#if ( _MSC_VER && !__INTEL_COMPILER )
// disable MS Visual Studio warnings
#pragma warning(disable:4146)
#endif
//------------------------------------------------------------------------------
// include GraphBLAS.h (depends on user threading model)
//------------------------------------------------------------------------------
#ifndef MATLAB_MEX_FILE
#define GB_LIBRARY
#endif
#include "GraphBLAS.h"
//------------------------------------------------------------------------------
// compiler variations
//------------------------------------------------------------------------------
// Determine the restrict keyword, and whether or not variable-length arrays
// are supported.
#if ( _MSC_VER && !__INTEL_COMPILER )
// Microsoft Visual Studio does not have the restrict keyword, but it does
// support __restrict, which is equivalent. Variable-length arrays are
// not supported. OpenMP tasks are not available.
#define GB_MICROSOFT 1
#define GB_RESTRICT __restrict
#define GB_HAS_VLA 0
#define GB_HAS_OPENMP_TASKS 0
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 and later have the restrict keyword and variable-length arrays.
#define GB_MICROSOFT 0
#define GB_RESTRICT restrict
#define GB_HAS_VLA 1
#define GB_HAS_OPENMP_TASKS 1
#else
// ANSI C95 and earlier have neither
#define GB_MICROSOFT 0
#define GB_RESTRICT
#define GB_HAS_VLA 0
#define GB_HAS_OPENMP_TASKS 1
#endif
//------------------------------------------------------------------------------
// Microsoft specific include files
//------------------------------------------------------------------------------
#if GB_MICROSOFT
#include <malloc.h>
#endif
//------------------------------------------------------------------------------
// OpenMP pragmas and tasks
//------------------------------------------------------------------------------
// GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the
// compiler:
#if GB_MICROSOFT
// MS Visual Studio is not ANSI C11 compliant, and uses __pragma:
#define GB_PRAGMA(x) __pragma (x)
#else
// ANSI C11 compilers use _Pragma:
#define GB_PRAGMA(x) _Pragma (#x)
#endif
// construct pragmas for loop vectorization:
#if GB_MICROSOFT
// no #pragma omp simd is available in MS Visual Studio
#define GB_PRAGMA_SIMD
#define GB_PRAGMA_SIMD_REDUCTION(op,s)
#else
// create two kinds of SIMD pragmas:
// GB_PRAGMA_SIMD becomes "#pragma omp simd"
// GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes
// "#pragma omp simd reduction(+:cij)"
#define GB_PRAGMA_SIMD GB_PRAGMA (omp simd)
#define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s))
#endif
// construct pragmas for OpenMP tasks, if available:
#if GB_HAS_OPENMP_TASKS
// Use OpenMP tasks
#define GB_TASK(func, ...) \
GB_PRAGMA(omp task firstprivate(__VA_ARGS__)) \
func (__VA_ARGS__)
#define GB_TASK_WAIT GB_PRAGMA (omp taskwait)
#define GB_TASK_MASTER(nthreads) \
GB_PRAGMA (omp parallel num_threads (nthreads)) \
GB_PRAGMA (omp master)
#else
// OpenMP tasks not available
#define GB_TASK(func, ...) func (__VA_ARGS__)
#define GB_TASK_WAIT
#define GB_TASK_MASTER(nthreads)
#endif
#define GB_PRAGMA_IVDEP GB_PRAGMA(ivdep)
//------------------------------------------------------------------------------
// PGI_COMPILER_BUG
//------------------------------------------------------------------------------
// If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is
// enabled for a bug in the PGI compiler. The compiler does not correctly
// handle automatic arrays of variable size.
#ifdef PGI_COMPILER_BUG
// override the ANSI C compiler to turn off variable-length arrays
#undef GB_HAS_VLA
#define GB_HAS_VLA 0
#endif
//------------------------------------------------------------------------------
// variable-length arrays
//------------------------------------------------------------------------------
// If variable-length arrays are not supported, user-defined types are limited
// in size to 128 bytes or less. Many of the type-generic routines allocate
// workspace for a single scalar of variable size, using a statement:
//
// GB_void aij [xsize] ;
//
// To support non-variable-length arrays in ANSI C95 or earlier, this is used:
//
// GB_void aij [GB_VLA(xsize)] ;
//
// GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed
// size of 128, in which case user-defined types are limited to a max of 128
// bytes.
typedef unsigned char GB_void ;
#if ( GB_HAS_VLA )
// variable-length arrays are allowed
#define GB_VLA(s) s
#else
// variable-length arrays are not allowed
#define GB_VLA_MAXSIZE 128
#define GB_VLA(s) GB_VLA_MAXSIZE
#endif
//------------------------------------------------------------------------------
// for coverage tests in Tcov/
//------------------------------------------------------------------------------
#ifdef GBCOVER
#define GBCOVER_MAX 20000
GB_PUBLIC int64_t GB_cov [GBCOVER_MAX] ;
GB_PUBLIC int GB_cover_max ;
#endif
//------------------------------------------------------------------------------
// GraphBLAS include files
//------------------------------------------------------------------------------
#include "GB_cplusplus.h"
#include "GB_Global.h"
#include "GB_printf.h"
#include "GB_assert.h"
#include "GB_opaque.h"
#include "GB_casting.h"
#include "GB_math.h"
#include "GB_bitwise.h"
#include "GB_wait.h"
#include "GB_binary_search.h"
//------------------------------------------------------------------------------
// default options
//------------------------------------------------------------------------------
// These parameters define the content of values that can be
// used as inputs to GxB_*Option_set.
// The default format is by row (CSR), with a hyper_ratio of 1/16.
// In Versions 2.1 and earlier, the default was GxB_BY_COL (CSC format).
#define GB_HYPER_DEFAULT (0.0625)
// compile SuiteSparse:GraphBLAS with "-DBYCOL" to make GxB_BY_COL the default
// format
#ifdef BYCOL
#define GB_FORMAT_DEFAULT GxB_BY_COL
#else
#define GB_FORMAT_DEFAULT GxB_BY_ROW
#endif
// these parameters define the hyper_ratio needed to ensure matrix stays
// either always hypersparse, or never hypersparse.
#define GB_ALWAYS_HYPER (1.0)
#define GB_NEVER_HYPER (-1.0)
#define GB_FORCE_HYPER 1
#define GB_FORCE_NONHYPER 0
#define GB_AUTO_HYPER (-1)
#define GB_SAME_HYPER_AS(A_is_hyper) \
((A_is_hyper) ? GB_FORCE_HYPER : GB_FORCE_NONHYPER)
// if A is hypersparse but all vectors are present, then
// treat A as if it were non-hypersparse
#define GB_IS_HYPER(A) \
(((A) != NULL) && ((A)->is_hyper && ((A)->nvec < (A)->vdim)))
//------------------------------------------------------------------------------
// macros for matrices and vectors
//------------------------------------------------------------------------------
// If A->nzmax is zero, then A->p might not be allocated. Note that this
// function does not count pending tuples; use GB_MATRIX_WAIT(A) first, if
// needed. For sparse or hypersparse matrix, Ap [0] == 0. For a slice or
// hyperslice, Ap [0] >= 0 points to the first entry in the slice. For all 4
// cases (sparse, hypersparse, slice, hyperslice), nnz(A) = Ap [nvec] - Ap [0].
#define GB_NNZ(A) (((A)->nzmax > 0) ? ((A)->p [(A)->nvec] - (A)->p [0]) : 0 )
// Upper bound on nnz(A) when the matrix has zombies and pending tuples;
// does not need GB_MATRIX_WAIT(A) first.
#define GB_NNZ_UPPER_BOUND(A) ((GB_NNZ (A) - A->nzombies) + GB_Pending_n (A))
int64_t GB_Pending_n // return # of pending tuples in A
(
GrB_Matrix A
) ;
// A is nrows-by-ncols, in either CSR or CSC format
#define GB_NROWS(A) ((A)->is_csc ? (A)->vlen : (A)->vdim)
#define GB_NCOLS(A) ((A)->is_csc ? (A)->vdim : (A)->vlen)
// The internal content of a GrB_Matrix and GrB_Vector are identical, and
// inside SuiteSparse:GraphBLAS, they can be typecasted between each other.
// This typecasting feature should not be done in user code, however, since it
// is not supported in the API. All GrB_Vector objects can be safely
// typecasted into a GrB_Matrix, but not the other way around. The GrB_Vector
// object is more restrictive. The GB_VECTOR_OK(v) macro defines the content
// that all GrB_Vector objects must have.
// GB_VECTOR_OK(v) is used mainly for assertions, but also to determine when it
// is safe to typecast an n-by-1 GrB_Matrix (in standard CSC format) into a
// GrB_Vector. This is not done in the main SuiteSparse:GraphBLAS library, but
// in the GraphBLAS/Test directory only. The macro is also used in
// GB_Vector_check, to ensure the content of a GrB_Vector is valid.
#define GB_VECTOR_OK(v) \
( \
((v) != NULL) && \
((v)->is_hyper == false) && \
((v)->is_csc == true) && \
((v)->plen == 1) && \
((v)->vdim == 1) && \
((v)->nvec == 1) && \
((v)->h == NULL) \
)
// A GxB_Vector is a GrB_Vector of length 1
#define GB_SCALAR_OK(v) (GB_VECTOR_OK(v) && ((v)->vlen == 1))
//------------------------------------------------------------------------------
// aliased objects
//------------------------------------------------------------------------------
// GraphBLAS allows all inputs to all user-accessible objects to be aliased, as
// in GrB_mxm (C, C, accum, C, C, ...), which is valid. Internal routines are
// more restrictive.
// GB_aliased also checks the content of A and B
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_aliased // determine if A and B are aliased
(
GrB_Matrix A, // input A matrix
GrB_Matrix B // input B matrix
) ;
//------------------------------------------------------------------------------
// internal GraphBLAS type and operator codes
//------------------------------------------------------------------------------
// GB_MAGIC is an arbitrary number that is placed inside each object when it is
// initialized, as a way of detecting uninitialized objects.
#define GB_MAGIC 0x72657473786f62ULL
// The magic number is set to GB_FREED when the object is freed, as a way of
// helping to detect dangling pointers.
#define GB_FREED 0x6c6c756e786f62ULL
// The value is set to GB_MAGIC2 when the object has been allocated but cannot
// yet be used in most methods and operations. Currently this is used only for
// when A->p array is allocated but not initialized.
#define GB_MAGIC2 0x7265745f786f62ULL
// predefined type objects
GB_PUBLIC struct GB_Type_opaque
GB_opaque_GrB_BOOL , // GrB_BOOL is a pointer to this object, etc.
GB_opaque_GrB_INT8 ,
GB_opaque_GrB_UINT8 ,
GB_opaque_GrB_INT16 ,
GB_opaque_GrB_UINT16 ,
GB_opaque_GrB_INT32 ,
GB_opaque_GrB_UINT32 ,
GB_opaque_GrB_INT64 ,
GB_opaque_GrB_UINT64 ,
GB_opaque_GrB_FP32 ,
GB_opaque_GrB_FP64 ,
GB_opaque_GxB_FC32 ,
GB_opaque_GxB_FC64 ;
//------------------------------------------------------------------------------
// monoid structs
//------------------------------------------------------------------------------
GB_PUBLIC struct GB_Monoid_opaque
// MIN monoids:
GB_opaque_GxB_MIN_INT8_MONOID, // identity: INT8_MAX
GB_opaque_GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX
GB_opaque_GxB_MIN_INT16_MONOID, // identity: INT16_MAX
GB_opaque_GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX
GB_opaque_GxB_MIN_INT32_MONOID, // identity: INT32_MAX
GB_opaque_GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX
GB_opaque_GxB_MIN_INT64_MONOID, // identity: INT64_MAX
GB_opaque_GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX
GB_opaque_GxB_MIN_FP32_MONOID, // identity: INFINITY
GB_opaque_GxB_MIN_FP64_MONOID, // identity: INFINITY
// MAX monoids:
GB_opaque_GxB_MAX_INT8_MONOID, // identity: INT8_MIN
GB_opaque_GxB_MAX_UINT8_MONOID, // identity: 0
GB_opaque_GxB_MAX_INT16_MONOID, // identity: INT16_MIN
GB_opaque_GxB_MAX_UINT16_MONOID, // identity: 0
GB_opaque_GxB_MAX_INT32_MONOID, // identity: INT32_MIN
GB_opaque_GxB_MAX_UINT32_MONOID, // identity: 0
GB_opaque_GxB_MAX_INT64_MONOID, // identity: INT64_MIN
GB_opaque_GxB_MAX_UINT64_MONOID, // identity: 0
GB_opaque_GxB_MAX_FP32_MONOID, // identity: -INFINITY
GB_opaque_GxB_MAX_FP64_MONOID, // identity: -INFINITY
// PLUS monoids:
GB_opaque_GxB_PLUS_INT8_MONOID, // identity: 0
GB_opaque_GxB_PLUS_UINT8_MONOID, // identity: 0
GB_opaque_GxB_PLUS_INT16_MONOID, // identity: 0
GB_opaque_GxB_PLUS_UINT16_MONOID, // identity: 0
GB_opaque_GxB_PLUS_INT32_MONOID, // identity: 0
GB_opaque_GxB_PLUS_UINT32_MONOID, // identity: 0
GB_opaque_GxB_PLUS_INT64_MONOID, // identity: 0
GB_opaque_GxB_PLUS_UINT64_MONOID, // identity: 0
GB_opaque_GxB_PLUS_FP32_MONOID, // identity: 0
GB_opaque_GxB_PLUS_FP64_MONOID, // identity: 0
GB_opaque_GxB_PLUS_FC32_MONOID, // identity: 0
GB_opaque_GxB_PLUS_FC64_MONOID, // identity: 0
// TIMES monoids:
GB_opaque_GxB_TIMES_INT8_MONOID, // identity: 1
GB_opaque_GxB_TIMES_UINT8_MONOID, // identity: 1
GB_opaque_GxB_TIMES_INT16_MONOID, // identity: 1
GB_opaque_GxB_TIMES_UINT16_MONOID, // identity: 1
GB_opaque_GxB_TIMES_INT32_MONOID, // identity: 1
GB_opaque_GxB_TIMES_UINT32_MONOID, // identity: 1
GB_opaque_GxB_TIMES_INT64_MONOID, // identity: 1
GB_opaque_GxB_TIMES_UINT64_MONOID, // identity: 1
GB_opaque_GxB_TIMES_FP32_MONOID, // identity: 1
GB_opaque_GxB_TIMES_FP64_MONOID, // identity: 1
GB_opaque_GxB_TIMES_FC32_MONOID, // identity: 1
GB_opaque_GxB_TIMES_FC64_MONOID, // identity: 1
// ANY monoids:
GB_opaque_GxB_ANY_INT8_MONOID,
GB_opaque_GxB_ANY_UINT8_MONOID,
GB_opaque_GxB_ANY_INT16_MONOID,
GB_opaque_GxB_ANY_UINT16_MONOID,
GB_opaque_GxB_ANY_INT32_MONOID,
GB_opaque_GxB_ANY_UINT32_MONOID,
GB_opaque_GxB_ANY_INT64_MONOID,
GB_opaque_GxB_ANY_UINT64_MONOID,
GB_opaque_GxB_ANY_FP32_MONOID,
GB_opaque_GxB_ANY_FP64_MONOID,
GB_opaque_GxB_ANY_FC32_MONOID,
GB_opaque_GxB_ANY_FC64_MONOID,
// Boolean monoids:
GB_opaque_GxB_ANY_BOOL_MONOID,
GB_opaque_GxB_LOR_BOOL_MONOID, // identity: false
GB_opaque_GxB_LAND_BOOL_MONOID, // identity: true
GB_opaque_GxB_LXOR_BOOL_MONOID, // identity: false
GB_opaque_GxB_EQ_BOOL_MONOID, // identity: true
// BOR monoids: (bitwise OR)
GB_opaque_GxB_BOR_UINT8_MONOID,
GB_opaque_GxB_BOR_UINT16_MONOID,
GB_opaque_GxB_BOR_UINT32_MONOID,
GB_opaque_GxB_BOR_UINT64_MONOID,
// BAND monoids: (bitwise and)
GB_opaque_GxB_BAND_UINT8_MONOID,
GB_opaque_GxB_BAND_UINT16_MONOID,
GB_opaque_GxB_BAND_UINT32_MONOID,
GB_opaque_GxB_BAND_UINT64_MONOID,
// BXOR monoids: (bitwise xor)
GB_opaque_GxB_BXOR_UINT8_MONOID,
GB_opaque_GxB_BXOR_UINT16_MONOID,
GB_opaque_GxB_BXOR_UINT32_MONOID,
GB_opaque_GxB_BXOR_UINT64_MONOID,
// BXNOR monoids: (bitwise xnor)
GB_opaque_GxB_BXNOR_UINT8_MONOID,
GB_opaque_GxB_BXNOR_UINT16_MONOID,
GB_opaque_GxB_BXNOR_UINT32_MONOID,
GB_opaque_GxB_BXNOR_UINT64_MONOID ;
//------------------------------------------------------------------------------
// select structs
//------------------------------------------------------------------------------
GB_PUBLIC struct GB_SelectOp_opaque
GB_opaque_GxB_TRIL,
GB_opaque_GxB_TRIU,
GB_opaque_GxB_DIAG,
GB_opaque_GxB_OFFDIAG,
GB_opaque_GxB_NONZERO,
GB_opaque_GxB_EQ_ZERO,
GB_opaque_GxB_GT_ZERO,
GB_opaque_GxB_GE_ZERO,
GB_opaque_GxB_LT_ZERO,
GB_opaque_GxB_LE_ZERO,
GB_opaque_GxB_NE_THUNK,
GB_opaque_GxB_EQ_THUNK,
GB_opaque_GxB_GT_THUNK,
GB_opaque_GxB_GE_THUNK,
GB_opaque_GxB_LT_THUNK,
GB_opaque_GxB_LE_THUNK ;
//------------------------------------------------------------------------------
// error logging and parallel thread control
//------------------------------------------------------------------------------
// Error messages are logged in GB_DLEN, on the stack, and then copied into
// thread-local storage of size GB_RLEN. If the user-defined data types,
// operators, etc have really long names, the error messages are safely
// truncated (via snprintf). This is intentional, but gcc with
// -Wformat-truncation will print a warning (see pragmas above). Ignore the
// warning.
// The Context also contains the number of threads to use in the operation. It
// is normally determined from the user's descriptor, with a default of
// nthreads_max = GxB_DEFAULT (that is, zero). The default rule is to let
// GraphBLAS determine the number of threads automatically by selecting a
// number of threads between 1 and nthreads_max. GrB_init initializes
// nthreads_max to omp_get_max_threads. Both the global value and the value in
// a descriptor can set/queried by GxB_set / GxB_get.
// Some GrB_Matrix and GrB_Vector methods do not take a descriptor, however
// (GrB_*_dup, _build, _exportTuples, _clear, _nvals, _wait, and GxB_*_resize).
// For those methods the default rule is always used (nthreads_max =
// GxB_DEFAULT), which then relies on the global nthreads_max.
#define GB_RLEN 384
#define GB_DLEN 256
typedef struct
{
double chunk ; // chunk size for small problems
int nthreads_max ; // max # of threads to use
const char *where ; // GraphBLAS function where error occurred
char details [GB_DLEN] ; // error report
bool use_mkl ; // control usage of Intel MKL
}
GB_Context_struct ;
typedef GB_Context_struct *GB_Context ;
// GB_WHERE keeps track of the currently running user-callable function.
// User-callable functions in this implementation are written so that they do
// not call other unrelated user-callable functions (except for GrB_*free).
// Related user-callable functions can call each other since they all report
// the same type-generic name. Internal functions can be called by many
// different user-callable functions, directly or indirectly. It would not be
// helpful to report the name of an internal function that flagged an error
// condition. Thus, each time a user-callable function is entered (except
// GrB_*free), it logs the name of the function with the GB_WHERE macro.
// GrB_*free does not encounter error conditions so it doesn't need to be
// logged by the GB_WHERE macro.
#ifndef GB_PANIC
#define GB_PANIC return (GrB_PANIC)
#endif
#define GB_CONTEXT(where_string) \
/* construct the Context */ \
GB_Context_struct Context_struct ; \
GB_Context Context = &Context_struct ; \
/* set Context->where so GrB_error can report it if needed */ \
Context->where = where_string ; \
/* get the default max # of threads and default chunk size */ \
Context->nthreads_max = GB_Global_nthreads_max_get ( ) ; \
Context->chunk = GB_Global_chunk_get ( ) ; \
Context->use_mkl = GB_Global_use_mkl_get ( )
#define GB_WHERE(where_string) \
if (!GB_Global_GrB_init_called_get ( )) \
{ \
/* GrB_init (or GxB_init) has not been called! */ \
GB_PANIC ; \
} \
GB_CONTEXT (where_string)
//------------------------------------------------------------------------------
// GB_GET_NTHREADS_MAX: determine max # of threads for OpenMP parallelism.
//------------------------------------------------------------------------------
// GB_GET_NTHREADS_MAX obtains the max # of threads to use and the chunk
// size from the Context. If Context is NULL then a single thread *must*
// be used. If Context->nthreads_max is <= GxB_DEFAULT, then select
// automatically: between 1 and nthreads_max, depending on the problem
// size. Below is the default rule. Any function can use its own rule
// instead, based on Context, chunk, nthreads_max, and the problem size.
// No rule can exceed nthreads_max.
#define GB_GET_NTHREADS_MAX(nthreads_max,chunk,Context) \
int nthreads_max = (Context == NULL) ? 1 : Context->nthreads_max ; \
if (nthreads_max <= GxB_DEFAULT) \
{ \
nthreads_max = GB_Global_nthreads_max_get ( ) ; \
} \
double chunk = (Context == NULL) ? GxB_DEFAULT : Context->chunk ; \
if (chunk <= GxB_DEFAULT) \
{ \
chunk = GB_Global_chunk_get ( ) ; \
}
//------------------------------------------------------------------------------
// GB_nthreads: determine # of threads to use for a parallel loop or region
//------------------------------------------------------------------------------
// If work < 2*chunk, then only one thread is used.
// else if work < 3*chunk, then two threads are used, and so on.
static inline int GB_nthreads // return # of threads to use
(
double work, // total work to do
double chunk, // give each thread at least this much work
int nthreads_max // max # of threads to use
)
{
work = GB_IMAX (work, 1) ;
chunk = GB_IMAX (chunk, 1) ;
int64_t nthreads = (int64_t) floor (work / chunk) ;
nthreads = GB_IMIN (nthreads, nthreads_max) ;
nthreads = GB_IMAX (nthreads, 1) ;
return ((int) nthreads) ;
}
//------------------------------------------------------------------------------
// error logging
//------------------------------------------------------------------------------
// The GB_ERROR and GB_LOG macros work together. If an error occurs, the
// GB_ERROR macro records the details in the Context.details, and returns the
// GrB_info to its 'caller'. This value can then be returned, or set to an
// info variable of type GrB_Info. For example:
//
// if (i >= nrows)
// {
// return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG,
// "Row index %d out of bounds; must be < %d", i, nrows))) ;
// }
//
// The user can then do:
//
// printf ("%s", GrB_error ( )) ;
//
// To print details of the error, which includes: which user-callable function
// encountered the error, the error status (GrB_INDEX_OUT_OF_BOUNDS), the
// details ("Row index 102 out of bounds, must be < 100").
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
const char *GB_status_code (GrB_Info info) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_error // log an error in thread-local-storage
(
GrB_Info info, // error return code from a GraphBLAS function
GB_Context Context // pointer to a Context struct, on the stack
) ;
// GB_LOG becomes the snprintf_args for GB_ERROR. Unused if Context is NULL.
#define GB_LOG Context->details, GB_DLEN
// if Context is NULL, do not log the error string in Context->details
#define GB_ERROR(info,snprintf_args) \
( \
((Context == NULL) ? 0 : snprintf snprintf_args), \
GB_error (info, Context) \
)
// return (GB_OUT_OF_MEMORY) ; reports an out-of-memory error
#define GB_OUT_OF_MEMORY GB_ERROR (GrB_OUT_OF_MEMORY, (GB_LOG, "out of memory"))
//------------------------------------------------------------------------------
// GraphBLAS check functions: check and optionally print an object
//------------------------------------------------------------------------------
// pr values for *_check functions
#define GB0 GxB_SILENT
#define GB1 GxB_SUMMARY
#define GB2 GxB_SHORT
#define GB3 GxB_COMPLETE
#define GB4 GxB_SHORT_VERBOSE
#define GB5 GxB_COMPLETE_VERBOSE
// a NULL name is treated as the empty string
#define GB_NAME ((name != NULL) ? name : "")
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_entry_check // print a single value
(
const GrB_Type type, // type of value to print
const void *x, // value to print
int pr, // print level
FILE *f, // file to print to
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_code_check // print and check an entry using a type code
(
const GB_Type_code code, // type code of value to print
const void *x, // entry to print
int pr, // print level
FILE *f, // file to print to
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Type_check // check a GraphBLAS Type
(
const GrB_Type type, // GraphBLAS type to print and check
const char *name, // name of the type from the caller; optional
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_BinaryOp_check // check a GraphBLAS binary operator
(
const GrB_BinaryOp op, // GraphBLAS operator to print and check
const char *name, // name of the operator
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_UnaryOp_check // check a GraphBLAS unary operator
(
const GrB_UnaryOp op, // GraphBLAS operator to print and check
const char *name, // name of the operator
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_SelectOp_check // check a GraphBLAS select operator
(
const GxB_SelectOp op, // GraphBLAS operator to print and check
const char *name, // name of the operator
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Monoid_check // check a GraphBLAS monoid
(
const GrB_Monoid monoid, // GraphBLAS monoid to print and check
const char *name, // name of the monoid, optional
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Semiring_check // check a GraphBLAS semiring
(
const GrB_Semiring semiring, // GraphBLAS semiring to print and check
const char *name, // name of the semiring, optional
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Descriptor_check // check a GraphBLAS descriptor
(
const GrB_Descriptor D, // GraphBLAS descriptor to print and check
const char *name, // name of the descriptor, optional
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_matvec_check // check a GraphBLAS matrix or vector
(
const GrB_Matrix A, // GraphBLAS matrix to print and check
const char *name, // name of the matrix, optional
int pr, // print level; if negative, ignore nzombie
// conditions and use GB_FLIP(pr) for diagnostics
FILE *f, // file for output
const char *kind, // "matrix" or "vector"
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Matrix_check // check a GraphBLAS matrix
(
const GrB_Matrix A, // GraphBLAS matrix to print and check
const char *name, // name of the matrix
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Vector_check // check a GraphBLAS vector
(
const GrB_Vector v, // GraphBLAS vector to print and check
const char *name, // name of the vector
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
GrB_Info GB_Scalar_check // check a GraphBLAS GxB_Scalar
(
const GxB_Scalar v, // GraphBLAS GxB_Scalar to print and check
const char *name, // name of the GxB_Scalar
int pr, // print level
FILE *f, // file for output
GB_Context Context
) ;
//------------------------------------------------------------------------------
// internal GraphBLAS functions
//------------------------------------------------------------------------------
GrB_Info GB_init // start up GraphBLAS
(
const GrB_Mode mode, // blocking or non-blocking mode
// pointers to memory management functions. Must be non-NULL.
void * (* malloc_function ) (size_t),
void * (* calloc_function ) (size_t, size_t),
void * (* realloc_function ) (void *, size_t),
void (* free_function ) (void *),
bool malloc_is_thread_safe,
bool caller_is_GxB_cuda_init, // true for GxB_cuda_init only
GB_Context Context // from GrB_init or GxB_init
) ;
typedef enum // input parameter to GB_new and GB_create
{
GB_Ap_calloc, // 0: calloc A->p, malloc A->h if hypersparse
GB_Ap_malloc, // 1: malloc A->p, malloc A->h if hypersparse
GB_Ap_null // 2: do not allocate A->p or A->h
}
GB_Ap_code ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_new // create matrix, except for indices & values
(
GrB_Matrix *Ahandle, // handle of matrix to create
const GrB_Type type, // matrix type
const int64_t vlen, // length of each vector
const int64_t vdim, // number of vectors
const GB_Ap_code Ap_option, // allocate A->p and A->h, or leave NULL
const bool is_csc, // true if CSC, false if CSR
const int hyper_option, // 1:hyper, 0:nonhyper, -1:auto
const double hyper_ratio, // A->hyper_ratio, unless auto
const int64_t plen, // size of A->p and A->h, if A hypersparse.
// Ignored if A is not hypersparse.
GB_Context Context
) ;
GrB_Info GB_create // create a new matrix, including A->i and A->x
(
GrB_Matrix *Ahandle, // output matrix to create
const GrB_Type type, // type of output matrix
const int64_t vlen, // length of each vector
const int64_t vdim, // number of vectors
const GB_Ap_code Ap_option, // allocate A->p and A->h, or leave NULL
const bool is_csc, // true if CSC, false if CSR
const int hyper_option, // 1:hyper, 0:nonhyper, -1:auto
const double hyper_ratio, // A->hyper_ratio, unless auto
const int64_t plen, // size of A->p and A->h, if hypersparse
const int64_t anz, // number of nonzeros the matrix must hold
const bool numeric, // if true, allocate A->x, else A->x is NULL
GB_Context Context
) ;
GrB_Info GB_hyper_realloc
(
GrB_Matrix A, // matrix with hyperlist to reallocate
int64_t plen_new, // new size of A->p and A->h
GB_Context Context
) ;
GrB_Info GB_clear // clear a matrix, type and dimensions unchanged
(
GrB_Matrix A, // matrix to clear
GB_Context Context
) ;
GrB_Info GB_dup // make an exact copy of a matrix
(
GrB_Matrix *Chandle, // handle of output matrix to create
const GrB_Matrix A, // input matrix to copy
const bool numeric, // if true, duplicate the numeric values
const GrB_Type ctype, // type of C, if numeric is false
GB_Context Context
) ;
GrB_Info GB_dup2 // make an exact copy of a matrix
(
GrB_Matrix *Chandle, // handle of output matrix to create
const GrB_Matrix A, // input matrix to copy
const bool numeric, // if true, duplicate the numeric values
const GrB_Type ctype, // type of C, if numeric is false
GB_Context Context
) ;
void GB_memcpy // parallel memcpy
(
void *dest, // destination
const void *src, // source
size_t n, // # of bytes to copy
int nthreads // # of threads to use
) ;
GrB_Info GB_nvals // get the number of entries in a matrix
(
GrB_Index *nvals, // matrix has nvals entries
const GrB_Matrix A, // matrix to query
GB_Context Context
) ;
GrB_Info GB_matvec_type // get the type of a matrix
(
GrB_Type *type, // returns the type of the matrix
const GrB_Matrix A, // matrix to query
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_ix_alloc // allocate A->i and A->x space in a matrix
(
GrB_Matrix A, // matrix to allocate space for
const GrB_Index nzmax, // number of entries the matrix can hold
const bool numeric, // if true, allocate A->x, otherwise A->x is NULL
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_ix_realloc // reallocate space in a matrix
(
GrB_Matrix A, // matrix to allocate space for
const GrB_Index nzmax, // new number of entries the matrix can hold
const bool numeric, // if true, reallocate A->x, otherwise A->x is NULL
GB_Context Context
) ;
GrB_Info GB_ix_resize // resize a matrix
(
GrB_Matrix A,
const int64_t anz_new, // required new nnz(A)
GB_Context Context
) ;
// free A->i and A->x and return if critical section fails
#define GB_IX_FREE(A) \
if (GB_ix_free (A) == GrB_PANIC) GB_PANIC
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_ix_free // free A->i and A->x of a matrix
(
GrB_Matrix A // matrix with content to free
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_ph_free // free A->p and A->h of a matrix
(
GrB_Matrix A // matrix with content to free
) ;
// free all content, and return if critical section fails
#define GB_PHIX_FREE(A) \
if (GB_phix_free (A) == GrB_PANIC) GB_PANIC
GrB_Info GB_phix_free // free all content of a matrix
(
GrB_Matrix A // matrix with content to free
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_Type_compatible // check if two types can be typecast
(
const GrB_Type atype,
const GrB_Type btype
) ;
//------------------------------------------------------------------------------
// GB_code_compatible: return true if domains are compatible
//------------------------------------------------------------------------------
// Two domains are compatible for typecasting between them if both are built-in
// types (of any kind) or if both are the same user-defined type. This
// function does not have the type itself, but just the code. If the types are
// available, GB_Type_compatible should be called instead.
static inline bool GB_code_compatible // true if two types can be typecast
(
const GB_Type_code acode, // type code of a
const GB_Type_code bcode // type code of b
)
{
bool a_user = (acode == GB_UDT_code) ;
bool b_user = (bcode == GB_UDT_code) ;
if (a_user || b_user)
{
// both a and b must be user-defined. They should be the same
// user-defined type, but the caller does not have the actual type,
// just the code.
return (a_user && b_user) ;
}
else
{
// any built-in domain is compatible with any other built-in domain
return (true) ;
}
}
//------------------------------------------------------------------------------
// GB_task_struct: parallel task descriptor
//------------------------------------------------------------------------------
// The element-wise computations (GB_add, GB_emult, and GB_mask) compute
// C(:,j)<M(:,j)> = op (A (:,j), B(:,j)). They are parallelized by slicing the
// work into tasks, described by the GB_task_struct.
// There are two kinds of tasks. For a coarse task, kfirst <= klast, and the
// task computes all vectors in C(:,kfirst:klast), inclusive. None of the
// vectors are sliced and computed by other tasks. For a fine task, klast is
// -1. The task computes part of the single vector C(:,kfirst). It starts at
// pA in Ai,Ax, at pB in Bi,Bx, and (if M is present) at pM in Mi,Mx. It
// computes C(:,kfirst), starting at pC in Ci,Cx.
// GB_subref also uses the TaskList. It has 12 kinds of fine tasks,
// corresponding to each of the 12 methods used in GB_subref_template. For
// those fine tasks, method = -TaskList [taskid].klast defines the method to
// use.
// The GB_subassign functions use the TaskList, in many different ways.
typedef struct // task descriptor
{
int64_t kfirst ; // C(:,kfirst) is the first vector in this task.
int64_t klast ; // C(:,klast) is the last vector in this task.
int64_t pC ; // fine task starts at Ci, Cx [pC]
int64_t pC_end ; // fine task ends at Ci, Cx [pC_end-1]
int64_t pM ; // fine task starts at Mi, Mx [pM]
int64_t pM_end ; // fine task ends at Mi, Mx [pM_end-1]
int64_t pA ; // fine task starts at Ai, Ax [pA]
int64_t pA_end ; // fine task ends at Ai, Ax [pA_end-1]
int64_t pB ; // fine task starts at Bi, Bx [pB]
int64_t pB_end ; // fine task ends at Bi, Bx [pB_end-1]
int64_t len ; // fine task handles a subvector of this length
}
GB_task_struct ;
// GB_REALLOC_TASK_LIST: Allocate or reallocate the TaskList so that it can
// hold at least ntasks. Double the size if it's too small.
#define GB_REALLOC_TASK_LIST(TaskList,ntasks,max_ntasks) \
{ \
if ((ntasks) >= max_ntasks) \
{ \
bool ok ; \
int nold = (max_ntasks == 0) ? 0 : (max_ntasks + 1) ; \
int nnew = 2 * (ntasks) + 1 ; \
TaskList = GB_REALLOC (TaskList, nnew, nold, GB_task_struct, &ok) ; \
if (!ok) \
{ \
/* out of memory */ \
GB_FREE_ALL ; \
return (GB_OUT_OF_MEMORY) ; \
} \
for (int t = nold ; t < nnew ; t++) \
{ \
TaskList [t].kfirst = -1 ; \
TaskList [t].klast = INT64_MIN ; \
TaskList [t].pA = INT64_MIN ; \
TaskList [t].pA_end = INT64_MIN ; \
TaskList [t].pB = INT64_MIN ; \
TaskList [t].pB_end = INT64_MIN ; \
TaskList [t].pC = INT64_MIN ; \
TaskList [t].pC_end = INT64_MIN ; \
TaskList [t].pM = INT64_MIN ; \
TaskList [t].pM_end = INT64_MIN ; \
TaskList [t].len = INT64_MIN ; \
} \
max_ntasks = 2 * (ntasks) ; \
} \
ASSERT ((ntasks) < max_ntasks) ; \
}
GrB_Info GB_ewise_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs, of size max_ntasks
int *p_max_ntasks, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads to use
// input:
const int64_t Cnvec, // # of vectors of C
const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse
const int64_t *GB_RESTRICT C_to_M, // mapping of C to M
const int64_t *GB_RESTRICT C_to_A, // mapping of C to A
const int64_t *GB_RESTRICT C_to_B, // mapping of C to B
bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only
const GrB_Matrix M, // mask matrix to slice (optional)
const GrB_Matrix A, // matrix to slice
const GrB_Matrix B, // matrix to slice
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_slice_vector
(
// output: return i, pA, and pB
int64_t *p_i, // work starts at A(i,kA) and B(i,kB)
int64_t *p_pM, // M(i:end,kM) starts at pM
int64_t *p_pA, // A(i:end,kA) starts at pA
int64_t *p_pB, // B(i:end,kB) starts at pB
// input:
const int64_t pM_start, // M(:,kM) starts at pM_start in Mi,Mx
const int64_t pM_end, // M(:,kM) ends at pM_end-1 in Mi,Mx
const int64_t *GB_RESTRICT Mi, // indices of M (or NULL)
const int64_t pA_start, // A(:,kA) starts at pA_start in Ai,Ax
const int64_t pA_end, // A(:,kA) ends at pA_end-1 in Ai,Ax
const int64_t *GB_RESTRICT Ai, // indices of A
const int64_t A_hfirst, // if Ai is an implicit hyperlist
const int64_t pB_start, // B(:,kB) starts at pB_start in Bi,Bx
const int64_t pB_end, // B(:,kB) ends at pB_end-1 in Bi,Bx
const int64_t *GB_RESTRICT Bi, // indices of B
const int64_t vlen, // A->vlen and B->vlen
const double target_work // target work
) ;
void GB_task_cumsum
(
int64_t *Cp, // size Cnvec+1
const int64_t Cnvec,
int64_t *Cnvec_nonempty, // # of non-empty vectors in C
GB_task_struct *GB_RESTRICT TaskList, // array of structs
const int ntasks, // # of tasks
const int nthreads // # of threads
) ;
//------------------------------------------------------------------------------
// GB_GET_VECTOR: get the content of a vector for a coarse/fine task
//------------------------------------------------------------------------------
#define GB_GET_VECTOR(pX_start, pX_fini, pX, pX_end, Xp, kX) \
int64_t pX_start, pX_fini ; \
if (fine_task) \
{ \
/* A fine task operates on a slice of X(:,k) */ \
pX_start = TaskList [taskid].pX ; \
pX_fini = TaskList [taskid].pX_end ; \
} \
else \
{ \
/* vectors are never sliced for a coarse task */ \
pX_start = Xp [kX] ; \
pX_fini = Xp [kX+1] ; \
}
//------------------------------------------------------------------------------
GrB_Info GB_transplant // transplant one matrix into another
(
GrB_Matrix C, // output matrix to overwrite with A
const GrB_Type ctype, // new type of C
GrB_Matrix *Ahandle, // input matrix to copy from and free
GB_Context Context
) ;
GrB_Info GB_transplant_conform // transplant and conform hypersparsity
(
GrB_Matrix C, // destination matrix to transplant into
GrB_Type ctype, // type to cast into
GrB_Matrix *Thandle, // source matrix
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
size_t GB_code_size // return the size of a type, given its code
(
const GB_Type_code code, // input code of the type to find the size of
const size_t usize // known size of user-defined type
) ;
//------------------------------------------------------------------------------
// memory management
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void *GB_calloc_memory // pointer to allocated block of memory
(
size_t nitems, // number of items to allocate
size_t size_of_item // sizeof each item
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void *GB_malloc_memory // pointer to allocated block of memory
(
size_t nitems, // number of items to allocate
size_t size_of_item // sizeof each item
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void *GB_realloc_memory // pointer to reallocated block of memory, or
// to original block if the realloc failed.
(
size_t nitems_new, // new number of items in the object
size_t nitems_old, // old number of items in the object
size_t size_of_item, // sizeof each item
void *p, // old object to reallocate
bool *ok // true if successful, false otherwise
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_free_memory
(
void *p // pointer to allocated block of memory to free
) ;
#define GB_FREE(p) \
{ \
GB_free_memory ((void *) p) ; \
(p) = NULL ; \
}
#define GB_CALLOC(n,type) (type *) GB_calloc_memory (n, sizeof (type))
#define GB_MALLOC(n,type) (type *) GB_malloc_memory (n, sizeof (type))
#define GB_REALLOC(p,nnew,nold,type,ok) \
p = (type *) GB_realloc_memory (nnew, nold, sizeof (type), (void *) p, ok)
//------------------------------------------------------------------------------
// macros to create/free matrices, vectors, and scalars
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Matrix_free // free a matrix
(
GrB_Matrix *matrix_handle // handle of matrix to free
) ;
#define GB_MATRIX_FREE(A) \
{ \
if (GB_Matrix_free (A) == GrB_PANIC) GB_PANIC ; \
}
#define GB_VECTOR_FREE(v) GB_MATRIX_FREE ((GrB_Matrix *) v)
#define GB_SCALAR_FREE(s) GB_MATRIX_FREE ((GrB_Matrix *) s)
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Type GB_code_type // return the GrB_Type corresponding to the code
(
const GB_Type_code code, // type code to convert
const GrB_Type type // user type if code is GB_UDT_code
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_slice // slice B into nthreads slices or hyperslices
(
GrB_Matrix B, // matrix to slice
int nthreads, // # of slices to create
int64_t *Slice, // array of size nthreads+1 that defines the slice
GrB_Matrix *Bslice, // array of output slices, of size nthreads
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_pslice // slice Ap; return true if ok, false if out of memory
(
int64_t *GB_RESTRICT *Slice_handle, // size ntasks+1
const int64_t *GB_RESTRICT Ap, // array of size n+1
const int64_t n,
const int ntasks // # of tasks
) ;
void GB_eslice
(
// output:
int64_t *Slice, // array of size ntasks+1
// input:
int64_t e, // number items to partition amongst the tasks
const int ntasks // # of tasks
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_cumsum // cumulative sum of an array
(
int64_t *GB_RESTRICT count, // size n+1, input/output
const int64_t n,
int64_t *GB_RESTRICT kresult, // return k, if needed by the caller
int nthreads
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Descriptor_get // get the contents of a descriptor
(
const GrB_Descriptor desc, // descriptor to query, may be NULL
bool *C_replace, // if true replace C before C<M>=Z
bool *Mask_comp, // if true use logical negation of M
bool *Mask_struct, // if true use the structure of M
bool *In0_transpose, // if true transpose first input
bool *In1_transpose, // if true transpose second input
GrB_Desc_Value *AxB_method, // method for C=A*B
GB_Context Context
) ;
GrB_Info GB_compatible // SUCCESS if all is OK, *_MISMATCH otherwise
(
const GrB_Type ctype, // the type of C (matrix or scalar)
const GrB_Matrix C, // the output matrix C; NULL if C is a scalar
const GrB_Matrix M, // optional mask, NULL if no mask
const GrB_BinaryOp accum, // C<M> = accum(C,T) is computed
const GrB_Type ttype, // type of T
GB_Context Context
) ;
GrB_Info GB_Mask_compatible // check type and dimensions of mask
(
const GrB_Matrix M, // mask to check
const GrB_Matrix C, // C<M>= ...
const GrB_Index nrows, // size of output if C is NULL (see GB*assign)
const GrB_Index ncols,
GB_Context Context
) ;
GrB_Info GB_BinaryOp_compatible // check for domain mismatch
(
const GrB_BinaryOp op, // binary operator to check
const GrB_Type ctype, // C must be compatible with op->ztype
const GrB_Type atype, // A must be compatible with op->xtype
const GrB_Type btype, // B must be compatible with op->ytype
const GB_Type_code bcode, // B may not have a type, just a code
GB_Context Context
) ;
// Several methods can use choose between a qsort-based method that takes
// O(anz*log(anz)) time, or a bucket-sort method that takes O(anz+n) time.
// The qsort method is choosen if the following condition is true:
#define GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET(anz,n) ((16 * (anz)) < (n))
GB_PUBLIC // accessed by the MATLAB interface only
bool GB_Index_multiply // true if ok, false if overflow
(
GrB_Index *GB_RESTRICT c, // c = a*b, or zero if overflow occurs
const int64_t a,
const int64_t b
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_size_t_multiply // true if ok, false if overflow
(
size_t *c, // c = a*b, or zero if overflow occurs
const size_t a,
const size_t b
) ;
bool GB_extract_vector_list // true if successful, false if out of memory
(
// output:
int64_t *GB_RESTRICT J, // size nnz(A) or more
// input:
const GrB_Matrix A,
int nthreads
) ;
GrB_Info GB_extractTuples // extract all tuples from a matrix
(
GrB_Index *I_out, // array for returning row indices of tuples
GrB_Index *J_out, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *p_nvals, // I,J,X size on input; # tuples on output
const GB_Type_code xcode, // type of array X
const GrB_Matrix A, // matrix to extract tuples from
GB_Context Context
) ;
GrB_Info GB_Monoid_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
const void *identity, // identity value
const void *terminal, // terminal value, if any (may be NULL)
const GB_Type_code idcode, // identity and terminal type code
GB_Context Context
) ;
//------------------------------------------------------------------------------
// GB_is_dense: check if a matrix is completely dense
//------------------------------------------------------------------------------
static inline bool GB_is_dense
(
const GrB_Matrix A
)
{
// check if A is competely dense: all entries present.
// zombies and pending tuples are not considered
if (A == NULL) return (false) ;
GrB_Index anzmax ;
bool ok = GB_Index_multiply (&anzmax, A->vlen, A->vdim) ;
return (ok && (anzmax == GB_NNZ (A))) ;
}
//------------------------------------------------------------------------------
// OpenMP definitions
//------------------------------------------------------------------------------
// GB_PART and GB_PARTITION: divide the index range 0:n-1 uniformly
// for nthreads. GB_PART(tid,n,nthreads) is the first index for thread tid.
#define GB_PART(tid,n,nthreads) \
(((tid) * ((double) (n))) / ((double) (nthreads)))
// thread tid will operate on the range k1:(k2-1)
#define GB_PARTITION(k1,k2,n,tid,nthreads) \
k1 = ((tid) == 0 ) ? 0 : GB_PART ((tid), n, nthreads) ; \
k2 = ((tid) == (nthreads)-1) ? (n) : GB_PART ((tid)+1,n, nthreads)
#if defined ( _OPENMP )
#include <omp.h>
#define GB_OPENMP_MAX_THREADS omp_get_max_threads ( )
#define GB_OPENMP_GET_NUM_THREADS omp_get_num_threads ( )
#define GB_OPENMP_GET_WTIME omp_get_wtime ( )
#else
#define GB_OPENMP_MAX_THREADS (1)
#define GB_OPENMP_GET_NUM_THREADS (1)
#define GB_OPENMP_GET_WTIME (0)
#endif
// by default, give each thread at least 64K units of work to do
#define GB_CHUNK_DEFAULT (64*1024)
//------------------------------------------------------------------------------
GrB_Info GB_setElement // set a single entry, C(row,col) = scalar
(
GrB_Matrix C, // matrix to modify
void *scalar, // scalar to set
const GrB_Index row, // row index
const GrB_Index col, // column index
const GB_Type_code scalar_code, // type of the scalar
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_block // apply all pending computations if blocking mode enabled
(
GrB_Matrix A,
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_op_is_second // return true if op is SECOND, of the right type
(
GrB_BinaryOp op,
GrB_Type type
) ;
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
char *GB_code_string // return a static string for a type name
(
const GB_Type_code code // code to convert to string
) ;
GrB_Info GB_resize // change the size of a matrix
(
GrB_Matrix A, // matrix to modify
const GrB_Index nrows_new, // new number of rows in matrix
const GrB_Index ncols_new, // new number of columns in matrix
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
int64_t GB_nvec_nonempty // return # of non-empty vectors
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_to_nonhyper // convert a matrix to non-hypersparse
(
GrB_Matrix A, // matrix to convert to non-hypersparse
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_to_hyper // convert a matrix to hypersparse
(
GrB_Matrix A, // matrix to convert to hypersparse
GB_Context Context
) ;
bool GB_to_nonhyper_test // test for conversion to hypersparse
(
GrB_Matrix A, // matrix to test
int64_t k, // # of non-empty vectors of A, an estimate is OK,
// but normally A->nvec_nonempty
int64_t vdim // normally A->vdim
) ;
bool GB_to_hyper_test // test for conversion to hypersparse
(
GrB_Matrix A, // matrix to test
int64_t k, // # of non-empty vectors of A, an estimate is OK,
// but normally A->nvec_nonempty
int64_t vdim // normally A->vdim
) ;
GrB_Info GB_to_hyper_conform // conform a matrix to its desired format
(
GrB_Matrix A, // matrix to conform
GB_Context Context
) ;
GrB_Info GB_hyper_prune
(
// output, not allocated on input:
int64_t *GB_RESTRICT *p_Ap, // size nvec+1
int64_t *GB_RESTRICT *p_Ah, // size nvec
int64_t *p_nvec, // # of vectors, all nonempty
// input, not modified
const int64_t *Ap_old, // size nvec_old+1
const int64_t *Ah_old, // size nvec_old
const int64_t nvec_old, // original number of vectors
GB_Context Context
) ;
GrB_Info GB_hypermatrix_prune
(
GrB_Matrix A, // matrix to prune
GB_Context Context
) ;
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_cast_array // typecast an array
(
GB_void *Cx, // output array
const GB_Type_code code1, // type code for Cx
GB_void *Ax, // input array
const GB_Type_code code2, // type code for Ax
const size_t user_size, // size of Ax and Cx if user-defined
const int64_t anz, // number of entries in Cx and Ax
const int nthreads // number of threads to use
) ;
//------------------------------------------------------------------------------
// boiler plate macros for checking inputs and returning if an error occurs
//------------------------------------------------------------------------------
// Functions use these macros to check/get their inputs and return an error
// if something has gone wrong.
#define GB_OK(method) \
{ \
info = method ; \
if (info != GrB_SUCCESS) \
{ \
GB_FREE_ALL ; \
return (info) ; \
} \
}
// check if a required arg is NULL
#define GB_RETURN_IF_NULL(arg) \
if ((arg) == NULL) \
{ \
/* the required arg is NULL */ \
return (GB_ERROR (GrB_NULL_POINTER, (GB_LOG, \
"Required argument is null: [%s]", GB_STR(arg)))) ; \
}
// arg may be NULL, but if non-NULL then it must be initialized
#define GB_RETURN_IF_FAULTY(arg) \
if ((arg) != NULL && (arg)->magic != GB_MAGIC) \
{ \
if ((arg)->magic == GB_MAGIC2) \
{ \
/* optional arg is not NULL, but invalid */ \
return (GB_ERROR (GrB_INVALID_OBJECT, (GB_LOG, \
"Argument is invalid: [%s]", GB_STR(arg)))) ; \
} \
else \
{ \
/* optional arg is not NULL, but not initialized */ \
return (GB_ERROR (GrB_UNINITIALIZED_OBJECT, (GB_LOG, \
"Argument is uninitialized: [%s]", GB_STR(arg)))) ; \
} \
}
// arg must not be NULL, and it must be initialized
#define GB_RETURN_IF_NULL_OR_FAULTY(arg) \
GB_RETURN_IF_NULL (arg) ; \
GB_RETURN_IF_FAULTY (arg) ;
// same as GB_RETURN_IF_NULL(arg), but set Context first
#define GB_CONTEXT_RETURN_IF_NULL(arg) \
if ((arg) == NULL) \
{ \
/* the required arg is NULL */ \
GB_WHERE (GB_WHERE_STRING) ; \
return (GB_ERROR (GrB_NULL_POINTER, (GB_LOG, \
"Required argument is null: [%s]", GB_STR(arg)))) ; \
}
// same as GB_RETURN_IF_FAULTY(arg), but set Context first
#define GB_CONTEXT_RETURN_IF_FAULTY(arg) \
if ((arg) != NULL && (arg)->magic != GB_MAGIC) \
{ \
GB_WHERE (GB_WHERE_STRING) ; \
if ((arg)->magic == GB_MAGIC2) \
{ \
/* optional arg is not NULL, but invalid */ \
return (GB_ERROR (GrB_INVALID_OBJECT, (GB_LOG, \
"Argument is invalid: [%s]", GB_STR(arg)))) ; \
} \
else \
{ \
/* optional arg is not NULL, but not initialized */ \
return (GB_ERROR (GrB_UNINITIALIZED_OBJECT, (GB_LOG, \
"Argument is uninitialized: [%s]", GB_STR(arg)))) ; \
} \
}
// check the descriptor and extract its contents; also copies
// nthreads_max, chunk, and use_mkl from the descriptor to the Context
#define GB_GET_DESCRIPTOR(info,desc,dout,dmc,dms,d0,d1,dalgo) \
GrB_Info info ; \
bool dout, dmc, dms, d0, d1 ; \
GrB_Desc_Value dalgo ; \
/* if desc is NULL then defaults are used. This is OK */ \
info = GB_Descriptor_get (desc, &dout, &dmc, &dms, &d0, &d1, &dalgo, \
Context) ; \
if (info != GrB_SUCCESS) \
{ \
/* desc not NULL, but uninitialized or an invalid object */ \
return (info) ; \
}
// C<M>=Z ignores Z if an empty mask is complemented, so return from
// the method without computing anything. But do apply the mask.
#define GB_RETURN_IF_QUICK_MASK(C, C_replace, M, Mask_comp) \
if (Mask_comp && M == NULL) \
{ \
/* C<!NULL>=NULL since result does not depend on computing Z */ \
return (C_replace ? GB_clear (C, Context) : GrB_SUCCESS) ; \
}
// GB_MASK_VERY_SPARSE is true if C<M>=A+B or C<M>=accum(C,T) is being
// computed, and the mask M is very sparse compared with A and B.
#define GB_MASK_VERY_SPARSE(M,A,B) (8 * GB_NNZ (M) < GB_NNZ (A) + GB_NNZ (B))
//------------------------------------------------------------------------------
// Pending upddate and zombies
//------------------------------------------------------------------------------
// GB_FLIP is a kind of "negation" about (-1) of a zero-based index.
// If i >= 0 then it is not flipped.
// If i < 0 then it has been flipped.
// Like negation, GB_FLIP is its own inverse: GB_FLIP (GB_FLIP (i)) == i.
// The "nil" value, -1, doesn't change when flipped: GB_FLIP (-1) = -1.
// GB_UNFLIP(i) is like taking an absolute value, undoing any GB_FLIP(i).
// An entry A(i,j) in a matrix can be marked as a "zombie". A zombie is an
// entry that has been marked for deletion, but hasn't been deleted yet because
// it's more efficient to delete all zombies all at once, instead of one at a
// time. Zombies are created by submatrix assignment, C(I,J)=A which copies
// not only new entries into C, but it also deletes entries already present in
// C. If an entry appears in A but not C(I,J), it is a new entry; new entries
// placed in the pending tuple lists to be added later. If an entry appear in
// C(I,J) but NOT in A, then it is marked for deletion by flipping its row
// index, marking it as a zombie.
// Zombies can be restored as regular entries by GrB_*assign. If an assignment
// C(I,J)=A finds an entry in A that is a zombie in C, the zombie becomes a
// regular entry, taking on the value from A. The row index is unflipped.
// Zombies are deleted and pending tuples are added into the matrix all at
// once, by GB_Matrix_wait.
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_Matrix_wait // finish all pending computations
(
GrB_Matrix A, // matrix with pending computations
GB_Context Context
) ;
#define GB_FLIP(i) (-(i)-2)
#define GB_IS_FLIPPED(i) ((i) < 0)
#define GB_IS_ZOMBIE(i) ((i) < 0)
#define GB_IS_NOT_FLIPPED(i) ((i) >= 0)
#define GB_IS_NOT_ZOMBIE(i) ((i) >= 0)
#define GB_UNFLIP(i) (((i) < 0) ? GB_FLIP(i) : (i))
// true if a matrix has pending tuples
#define GB_PENDING(A) ((A) != NULL && (A)->Pending != NULL)
// true if a matrix is allowed to have pending tuples
#define GB_PENDING_OK(A) (GB_PENDING (A) || !GB_PENDING (A))
// true if a matrix has zombies
#define GB_ZOMBIES(A) ((A) != NULL && (A)->nzombies > 0)
// true if a matrix is allowed to have zombies
#define GB_ZOMBIES_OK(A) (((A) == NULL) || ((A) != NULL && (A)->nzombies >= 0))
// true if a matrix has pending tuples or zombies
#define GB_PENDING_OR_ZOMBIES(A) (GB_PENDING (A) || GB_ZOMBIES (A))
// do all pending updates: delete zombies and assemble any pending tuples
#define GB_MATRIX_WAIT(A) \
{ \
if (GB_PENDING_OR_ZOMBIES (A)) \
{ \
GB_OK (GB_Matrix_wait ((GrB_Matrix) A, Context)) ; \
ASSERT (!GB_ZOMBIES (A)) ; \
ASSERT (!GB_PENDING (A)) ; \
} \
}
#define GB_VECTOR_WAIT(v) GB_MATRIX_WAIT (v)
#define GB_SCALAR_WAIT(s) GB_MATRIX_WAIT (s)
// do all pending updates: but only if pending tuples; zombies are OK
#define GB_MATRIX_WAIT_PENDING(A) \
{ \
if (GB_PENDING (A)) \
{ \
/* do all pending work: delete zombies and assemble pending tuples */ \
GB_OK (GB_Matrix_wait ((GrB_Matrix) A, Context)) ; \
ASSERT (!GB_ZOMBIES (A)) ; \
ASSERT (!GB_PENDING (A)) ; \
} \
ASSERT (GB_ZOMBIES_OK (A)) ; \
}
// true if a matrix has no entries; zombies OK
#define GB_EMPTY(A) ((GB_NNZ (A) == 0) && !GB_PENDING (A))
//------------------------------------------------------------------------------
// built-in unary and binary operators
//------------------------------------------------------------------------------
#define GB_TYPE bool
#define GB_REAL
#define GB_BOOLEAN
#define GB(x) GB_ ## x ## _BOOL
#define GB_BITS 1
#include "GB_ops_template.h"
#define GB_TYPE int8_t
#define GB_REAL
#define GB_SIGNED_INT
#define GB(x) GB_ ## x ## _INT8
#define GB_BITS 8
#include "GB_ops_template.h"
#define GB_TYPE uint8_t
#define GB_REAL
#define GB_UNSIGNED_INT
#define GB(x) GB_ ## x ## _UINT8
#define GB_BITS 8
#include "GB_ops_template.h"
#define GB_TYPE int16_t
#define GB_REAL
#define GB_SIGNED_INT
#define GB(x) GB_ ## x ## _INT16
#define GB_BITS 16
#include "GB_ops_template.h"
#define GB_TYPE uint16_t
#define GB_REAL
#define GB_UNSIGNED_INT
#define GB(x) GB_ ## x ## _UINT16
#define GB_BITS 16
#include "GB_ops_template.h"
#define GB_TYPE int32_t
#define GB_REAL
#define GB_SIGNED_INT
#define GB(x) GB_ ## x ## _INT32
#define GB_BITS 32
#include "GB_ops_template.h"
#define GB_TYPE uint32_t
#define GB_REAL
#define GB_UNSIGNED_INT
#define GB(x) GB_ ## x ## _UINT32
#define GB_BITS 32
#include "GB_ops_template.h"
#define GB_TYPE int64_t
#define GB_REAL
#define GB_SIGNED_INT
#define GB(x) GB_ ## x ## _INT64
#define GB_BITS 64
#include "GB_ops_template.h"
#define GB_TYPE uint64_t
#define GB_REAL
#define GB_UNSIGNED_INT
#define GB(x) GB_ ## x ## _UINT64
#define GB_BITS 64
#include "GB_ops_template.h"
#define GB_TYPE float
#define GB_REAL
#define GB_FLOATING_POINT
#define GB_FLOAT
#define GB(x) GB_ ## x ## _FP32
#define GB_BITS 32
#include "GB_ops_template.h"
#define GB_TYPE double
#define GB_REAL
#define GB_FLOATING_POINT
#define GB_DOUBLE
#define GB(x) GB_ ## x ## _FP64
#define GB_BITS 64
#include "GB_ops_template.h"
#define GB_TYPE GxB_FC32_t
#define GB_COMPLEX
#define GB_FLOATING_POINT
#define GB_FLOAT_COMPLEX
#define GB(x) GB_ ## x ## _FC32
#define GB_BITS 64
#include "GB_ops_template.h"
#define GB_TYPE GxB_FC64_t
#define GB_COMPLEX
#define GB_FLOATING_POINT
#define GB_DOUBLE_COMPLEX
#define GB(x) GB_ ## x ## _FC64
#define GB_BITS 128
#include "GB_ops_template.h"
#define GB_opaque_GrB_LNOT GB_opaque_GxB_LNOT_BOOL
#define GB_opaque_GrB_LOR GB_opaque_GxB_LOR_BOOL
#define GB_opaque_GrB_LAND GB_opaque_GxB_LAND_BOOL
#define GB_opaque_GrB_LXOR GB_opaque_GxB_LXOR_BOOL
#define GB_opaque_GrB_LXNOR GB_opaque_GxB_LXNOR_BOOL
//------------------------------------------------------------------------------
// CUDA (DRAFT: in progress)
//------------------------------------------------------------------------------
#include "GB_cuda_gateway.h"
#endif
|
dem_structures_coupling_utilities.h | /*
* Author: Miguel Angel Celigueta
*
* maceli@cimne.upc.edu
*/
#ifndef KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
#define KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
// /* External includes */
// System includes
// Project includes
#include "includes/variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "custom_conditions/RigidFace.h"
#include "custom_conditions/RigidEdge.h"
#include "DEM_application_variables.h"
#include "dem_structures_coupling_application_variables.h"
#include "custom_elements/spheric_continuum_particle.h"
namespace Kratos
{
class DemStructuresCouplingUtilities
{
public:
typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType;
KRATOS_CLASS_POINTER_DEFINITION(DemStructuresCouplingUtilities);
/// Default constructor
DemStructuresCouplingUtilities(){}
/// Destructor
virtual ~DemStructuresCouplingUtilities(){}
//***************************************************************************************************************
//***************************************************************************************************************
void TransferStructuresSkinToDem(ModelPart& r_source_model_part, ModelPart& r_destination_model_part, Properties::Pointer props) {
std::string error = CheckProvidedProperties(props);
const int dimension = r_source_model_part.GetProcessInfo()[DOMAIN_SIZE];
if (error != "all_ok") KRATOS_ERROR << "The Dem Walls ModelPart has no valid Properties. Missing " << error << " . Exiting." << std::endl;
r_destination_model_part.Conditions().Sort();
int id = 1;
if (r_destination_model_part.Conditions().size()) id = (r_destination_model_part.ConditionsEnd()-1)->Id() + 1;
ModelPart::ConditionsContainerType& source_conditions = r_source_model_part.Conditions();
// Adding conditions
for (unsigned int i = 0; i < source_conditions.size(); i++) {
ModelPart::ConditionsContainerType::iterator it = r_source_model_part.ConditionsBegin() + i;
Geometry< Node<3> >::Pointer p_geometry = it->pGetGeometry();
Condition::Pointer cond;
if (dimension == 2) {
cond = Condition::Pointer(new RigidEdge2D(id, p_geometry, props));
} else {
cond = Condition::Pointer(new RigidFace3D(id, p_geometry, props));
}
cond->Set(DEMFlags::STICKY, true);
r_destination_model_part.AddCondition(cond); //TODO: add all of them in a single sentence! AddConditions. Use a temporary PointerVector as a list (not std::vector!).
id++;
}
// Adding nodes
r_destination_model_part.AddNodes(r_source_model_part.NodesBegin(), r_source_model_part.NodesEnd());
}
std::string CheckProvidedProperties(Properties::Pointer props) {
std::vector<const Variable<double>* > list_of_variables_double_to_check = {&FRICTION, &WALL_COHESION, &SEVERITY_OF_WEAR, &IMPACT_WEAR_SEVERITY, &BRINELL_HARDNESS, &YOUNG_MODULUS, &POISSON_RATIO};
std::vector<const Variable<bool>* > list_of_variables_bool_to_check = {&COMPUTE_WEAR};
for (int i=0; i<(int)list_of_variables_double_to_check.size(); i++) {
if(!props->Has(*list_of_variables_double_to_check[i])) return list_of_variables_double_to_check[i]->Name();
}
for (int i=0; i<(int)list_of_variables_bool_to_check.size(); i++) {
if(!props->Has(*list_of_variables_bool_to_check[i])) return list_of_variables_bool_to_check[i]->Name();
}
return "all_ok";
}
void SmoothLoadTrasferredToFem(ModelPart& r_model_part, const double portion_of_the_force_which_is_new) {
#pragma omp parallel for
for (int i=0; i<(int)r_model_part.Nodes().size(); i++) {
auto node_it = r_model_part.NodesBegin() + i;
array_1d<double, 3> averaged_force;
array_1d<double, 3>& node_dem_load = node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD);
noalias(averaged_force) = portion_of_the_force_which_is_new * node_dem_load + (1.0 - portion_of_the_force_which_is_new) * node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD, 1);
noalias(node_dem_load) = averaged_force;
}
}
void ComputeSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) {
const std::string sand_prod_filename = "sand_production_graph.txt";
static std::ofstream ofs_sand_prod_file;
static bool first_time_entered = true;
if (first_time_entered) {
ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc);
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
double current_total_mass_in_grams = 0.0;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
const double particle_density = p_sphere->GetDensity();
const double particle_volume = p_sphere->CalculateVolume();
current_total_mass_in_grams += particle_volume * particle_density * 1.0e3;
}
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
//ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin();
//const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * 0.000145;
ProcessInfo& r_process_info = dem_model_part.GetProcessInfo();
const double Pascals_to_psi_factor = 0.000145;
const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor;
static std::ofstream sand_prod_file("sand_production_graph.txt", std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
void MarkBrokenSpheres(ModelPart& dem_model_part) {
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
bool go_to_next_particle = false;
for (unsigned int i = 0; i < p_sphere->mContinuumInitialNeighborsSize; i++) {
if (!p_sphere->mIniNeighbourFailureId[i]) {
go_to_next_particle = true;
break;
}
}
if (go_to_next_particle) continue;
else p_sphere->Set(ISOLATED, true);
}
}
void ComputeSandProductionWithDepthFirstSearchNonRecursiveImplementation(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) {
const std::string sand_prod_filename = "sand_production_graph_with_chunks_non_recursive.txt";
static std::ofstream ofs_sand_prod_file;
const std::string granulometry_distr_filename = "granulometry_distribution.txt";
static std::ofstream ofs_granulometry_distr_file;
static bool first_time_entered = true;
if (first_time_entered) {
ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc);
ofs_granulometry_distr_file.open(granulometry_distr_filename, std::ofstream::out | std::ofstream::trunc);
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
std::vector<double> chunks_masses;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
it->Set(VISITED, false);
}
std::vector<SphericContinuumParticle*> stack_of_particles_to_check;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element);
double this_chunk_mass = 0.0;
stack_of_particles_to_check.push_back(p_sphere);
while (stack_of_particles_to_check.size()) {
SphericContinuumParticle* current_particle = stack_of_particles_to_check.back();
stack_of_particles_to_check.pop_back();
if (current_particle->Is(VISITED)) continue;
const double particle_density = current_particle->GetDensity();
const double particle_volume = current_particle->CalculateVolume();
this_chunk_mass += particle_volume * particle_density * 1.0e3;
current_particle->Set(VISITED, true);
for (size_t i = 0; i < current_particle->mContinuumInitialNeighborsSize; i++) {
SphericParticle* p_neighbour_sphere = current_particle->mNeighbourElements[i];
if (p_neighbour_sphere == NULL) continue;
if (p_neighbour_sphere->Is(VISITED)) continue; //not necessary, but saves increasing and decreasing stack_of_particles_to_check's size
if (current_particle->mIniNeighbourFailureId[i]) continue;
auto existing_element_it = dem_model_part.GetMesh(0).Elements().find(p_neighbour_sphere->Id());
if (existing_element_it == dem_model_part.GetMesh(0).ElementsEnd()) continue;
SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere);
stack_of_particles_to_check.push_back(p_neigh_cont_sphere);
}
}
if (this_chunk_mass) chunks_masses.push_back(this_chunk_mass);
}
const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end());
const double current_total_mass_in_grams = max_mass_of_a_single_chunck;
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ProcessInfo& r_process_info = dem_model_part.GetProcessInfo();
const double Pascals_to_psi_factor = 0.000145;
const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor;
ofs_sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
ofs_sand_prod_file.flush();
unsigned int number_of_time_steps_between_granulometry_prints = 1e9;
static unsigned int printing_counter = 0;
if (printing_counter == number_of_time_steps_between_granulometry_prints) {
ofs_granulometry_distr_file << time;
for (unsigned int k = 0; k < chunks_masses.size(); k++) ofs_granulometry_distr_file << " " << chunks_masses[k];
ofs_granulometry_distr_file << '\n';
printing_counter = 0;
}
printing_counter++;
ofs_granulometry_distr_file.flush();
}
void ComputeSandProductionWithDepthFirstSearch(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) {
const std::string filename = "sand_production_graph_with_chunks.txt";
std::ifstream ifile(filename.c_str());
static bool first_time_entered = true;
if ((bool) ifile && first_time_entered) {
std::remove(filename.c_str());
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
std::vector<double> chunks_masses;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
it->Set(VISITED, false);
}
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element);
double this_chunk_mass = 0.0;
if( it->IsNot(VISITED) ) {
DepthFirstSearchVisit(p_sphere, this_chunk_mass);
chunks_masses.push_back(this_chunk_mass);
}
}
const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end());
const double current_total_mass_in_grams = max_mass_of_a_single_chunck;
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin();
const double Pascals_to_psi_factor = 0.000145;
const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * Pascals_to_psi_factor;
static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
void DepthFirstSearchVisit(SphericContinuumParticle* p_sphere, double& this_chunk_mass) {
p_sphere->Set(VISITED, true);
const double particle_radius = p_sphere->GetRadius();
const double particle_density = p_sphere->GetDensity();
this_chunk_mass += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0;
for (size_t i=0; i<p_sphere->mContinuumInitialNeighborsSize; i++) {
SphericParticle* p_neighbour_sphere = p_sphere->mNeighbourElements[i];
if (p_neighbour_sphere==NULL) continue;
if (p_sphere->mIniNeighbourFailureId[i]) continue;
if (p_neighbour_sphere->IsNot(VISITED)) {
SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere);
DepthFirstSearchVisit(p_neigh_cont_sphere, this_chunk_mass);
}
}
}
void ComputeTriaxialSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part_1, ModelPart& outer_walls_model_part_2, const double time) {
const std::string filename = "sand_production_graph.txt";
std::ifstream ifile(filename.c_str());
static bool first_time_entered = true;
if ((bool) ifile && first_time_entered) {
std::remove(filename.c_str());
first_time_entered = false;
}
ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements();
double current_total_mass_in_grams = 0.0;
for (unsigned int k = 0; k < pElements.size(); k++) {
ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k;
Element* raw_p_element = &(*it);
SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element);
if (p_sphere->Is(ISOLATED)) continue;
const double particle_radius = p_sphere->GetRadius();
const double particle_density = p_sphere->GetDensity();
current_total_mass_in_grams += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0;
}
static const double initial_total_mass_in_grams = current_total_mass_in_grams;
const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams;
ModelPart::ConditionsContainerType::iterator condition_begin_1 = outer_walls_model_part_1.ConditionsBegin();
ModelPart::ConditionsContainerType::iterator condition_begin_2 = outer_walls_model_part_2.ConditionsBegin();
const double Pascals_to_psi_factor = 0.000145;
const double face_pressure_in_psi = (condition_begin_1->GetValue(POSITIVE_FACE_PRESSURE) +
condition_begin_2->GetValue(POSITIVE_FACE_PRESSURE) +
3.45e6) * Pascals_to_psi_factor * 0.33333333333333; // 3.45e6 is the sigma_z constant pressure
static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app);
sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n';
sand_prod_file.flush();
}
//***************************************************************************************************************
//***************************************************************************************************************
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
protected:
private:
/// Assignment operator
DemStructuresCouplingUtilities & operator=(DemStructuresCouplingUtilities const& rOther);
///@}
}; // Class DemStructuresCouplingUtilities
} // namespace Python.
#endif // KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
|
perturbation_fold.c | #ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef VRNA_WITH_GSL
#include <gsl/gsl_multimin.h>
#endif
#include "ViennaRNA/eval.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/part_func.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/perturbation_fold.h"
static void
calculate_probability_unpaired(vrna_fold_compound_t *vc,
double *probability)
{
int length = vc->length;
FLT_OR_DBL *probs = vc->exp_matrices->probs;
int *iidx = vc->iindx;
int i, j;
for (i = 0; i <= length; ++i)
probability[i] = 1;
for (i = 1; i <= length; ++i)
for (j = i + 1; j <= length; ++j) {
probability[i] -= probs[iidx[i] - j];
probability[j] -= probs[iidx[i] - j];
}
}
#if 0
static double
calculate_norm(double *vector,
int length)
{
double sum = 0;
int i;
for (i = 1; i <= length; ++i)
sum += vector[i] * vector[i];
return sqrt(sum);
}
#endif
static void
addSoftConstraint(vrna_fold_compound_t *vc,
const double *epsilon,
int length)
{
vrna_sc_t *sc;
int i, j;
double kT = vc->exp_params->kT / 1000;
sc = vrna_alloc(sizeof(vrna_sc_t));
sc->exp_energy_up = vrna_alloc(sizeof(FLT_OR_DBL *) * (length + 2));
sc->exp_energy_up[0] = vrna_alloc(1);
for (i = 1; i <= length; ++i)
sc->exp_energy_up[i] = vrna_alloc(sizeof(FLT_OR_DBL) * (length - i + 2));
for (i = 1; i <= length; ++i) {
sc->exp_energy_up[i][0] = 1;
for (j = 1; j <= length - i + 1; ++j)
sc->exp_energy_up[i][j] = sc->exp_energy_up[i][j - 1] * exp(-(epsilon[i + j - 1]) / kT);
}
/* also add sc for MFE computation */
sc->energy_up = vrna_alloc(sizeof(int *) * (length + 2));
sc->energy_up[0] = vrna_alloc(sizeof(int));
for (i = 1; i <= length; ++i)
sc->energy_up[i] = vrna_alloc(sizeof(int) * (length - i + 2));
for (i = 1; i <= length; ++i) {
sc->energy_up[i][0] = 0;
for (j = 1; j <= length - i + 1; ++j)
sc->energy_up[i][j] = sc->energy_up[i][j - 1] + (epsilon[i + j - 1] * 100.);
}
vc->sc = sc;
}
static double
evaluate_objective_function_contribution(double value,
int objective_function)
{
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC)
return value * value;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE)
return fabs(value);
assert(0);
return 0;
}
static double
evaluate_perturbation_vector_score(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function)
{
double ret = 0;
double ret2 = 0.;
double *p_prob_unpaired;
int i;
int length = vc->length;
/* calculate pairing probabilty in the pertubated energy model */
p_prob_unpaired = vrna_alloc(sizeof(double) * (length + 1));
addSoftConstraint(vc, epsilon, length);
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, p_prob_unpaired);
vrna_sc_remove(vc);
for (i = 1; i <= length; ++i) {
/* add penalty for pertubation energies */
ret += evaluate_objective_function_contribution(epsilon[i], objective_function) / tau_squared;
/* add penalty for mismatches between observed and predicted probabilities */
if (q_prob_unpaired[i] >= 0) /* ignore positions with missing data */
ret2 += evaluate_objective_function_contribution(p_prob_unpaired[i] - q_prob_unpaired[i],
objective_function) / sigma_squared;
}
vrna_message_info(stderr, "Score: pertubation: %g\tdiscrepancy: %g", ret, ret2);
free(p_prob_unpaired);
return ret + ret2;
}
static void
pairing_probabilities_from_restricted_pf(vrna_fold_compound_t *vc,
const double *epsilon,
double *prob_unpaired,
double **conditional_prob_unpaired)
{
int length = vc->length;
int i;
addSoftConstraint(vc, epsilon, length);
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, prob_unpaired);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 1; i <= length; ++i) {
vrna_fold_compound_t *restricted_vc;
char *hc_string;
unsigned int constraint_options = VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
hc_string = vrna_alloc(sizeof(char) * (length + 1));
memset(hc_string, '.', length);
hc_string[i - 1] = 'x';
restricted_vc = vrna_fold_compound(vc->sequence,
&(vc->exp_params->model_details),
VRNA_OPTION_PF);
vrna_constraints_add(restricted_vc, hc_string, constraint_options);
free(hc_string);
vrna_exp_params_subst(restricted_vc, vc->exp_params);
vrna_pf(restricted_vc, NULL);
calculate_probability_unpaired(restricted_vc, conditional_prob_unpaired[i]);
restricted_vc->sc = NULL;
vrna_fold_compound_free(restricted_vc);
}
vrna_sc_remove(vc);
}
static void
pairing_probabilities_from_sampling(vrna_fold_compound_t *vc,
const double *epsilon,
int sample_size,
double *prob_unpaired,
double **conditional_prob_unpaired)
{
int length = vc->length;
int i, j, s;
st_back = 1; /* is this really required? */
addSoftConstraint(vc, epsilon, length);
vc->exp_params->model_details.compute_bpp = 0;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
#ifdef _OPENMP
#pragma omp parallel for private(s)
#endif
for (s = 0; s < sample_size; ++s) {
char *sample = vrna_pbacktrack(vc);
#ifdef _OPENMP
#pragma omp critical
#endif
{
for (i = 1; i <= length; ++i) {
if (sample[i - 1] != '.')
continue;
++prob_unpaired[i];
for (j = 1; j <= length; ++j)
if (sample[j - 1] == '.')
++conditional_prob_unpaired[i][j];
}
}
free(sample);
}
for (i = 1; i <= length; ++i) {
if (prob_unpaired[i])
for (j = 1; j <= length; ++j)
conditional_prob_unpaired[i][j] /= prob_unpaired[i];
prob_unpaired[i] /= sample_size;
assert(prob_unpaired[i] >= 0 && prob_unpaired[i] <= 1);
}
vrna_sc_remove(vc);
}
static void
allocateProbabilityArrays(double **unpaired,
double ***conditional_unpaired,
int length)
{
int i;
*unpaired = vrna_alloc(sizeof(double) * (length + 1));
*conditional_unpaired = vrna_alloc(sizeof(double *) * (length + 1));
for (i = 1; i <= length; ++i)
(*conditional_unpaired)[i] = vrna_alloc(sizeof(double) * (length + 1));
}
static void
freeProbabilityArrays(double *unpaired,
double **conditional_unpaired,
int length)
{
int i;
free(unpaired);
for (i = 1; i <= length; ++i)
free(conditional_unpaired[i]);
free(conditional_unpaired);
}
static void
evaluate_perturbation_vector_gradient(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function,
int sample_size,
double *gradient)
{
double *p_prob_unpaired;
double **p_conditional_prob_unpaired;
int i, mu;
int length = vc->length;
double kT = vc->exp_params->kT / 1000;
allocateProbabilityArrays(&p_prob_unpaired, &p_conditional_prob_unpaired, length);
if (sample_size > 0) {
pairing_probabilities_from_sampling(vc,
epsilon,
sample_size,
p_prob_unpaired,
p_conditional_prob_unpaired);
} else {
pairing_probabilities_from_restricted_pf(vc,
epsilon,
p_prob_unpaired,
p_conditional_prob_unpaired);
}
for (mu = 1; mu <= length; ++mu) {
double sum = 0;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC) {
for (i = 1; i <= length; ++i) {
if (q_prob_unpaired[i] < 0) /* ignore positions with missing data */
continue;
sum += (p_prob_unpaired[i] - q_prob_unpaired[i])
* p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])
/ sigma_squared;
}
gradient[mu] = 2 * (epsilon[mu] / tau_squared + sum / kT);
} else if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE) {
for (i = 1; i <= length; ++i)
if (q_prob_unpaired[i] >= 0 && p_prob_unpaired[i] != q_prob_unpaired[i]) {
sum += (p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])) /
kT
/ sigma_squared
* (p_prob_unpaired[i] > q_prob_unpaired[i] ? 1. : -1.);
}
if (epsilon[mu])
sum += (epsilon[mu] > 0 ? 1. : -1.) / tau_squared;
gradient[mu] = sum;
}
}
freeProbabilityArrays(p_prob_unpaired, p_conditional_prob_unpaired, length);
}
#ifdef VRNA_WITH_GSL
typedef struct parameters_gsl {
vrna_fold_compound_t *vc;
const double *q_prob_unpaired;
double sigma_squared;
double tau_squared;
int objective_function;
int sample_size;
} parameters_gsl;
static double
f_gsl(const gsl_vector *x,
void *params)
{
parameters_gsl *p = params;
return evaluate_perturbation_vector_score(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function);
}
static void
df_gsl(const gsl_vector *x,
void *params,
gsl_vector *df)
{
parameters_gsl *p = params;
gsl_vector_set(df, 0, 0);
evaluate_perturbation_vector_gradient(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function,
p->sample_size,
df->data);
}
static void
fdf_gsl(const gsl_vector *x,
void *params,
double *f,
gsl_vector *g)
{
*f = f_gsl(x, params);
df_gsl(x, params, g);
}
#endif /* VRNA_WITH_GSL */
PUBLIC void
vrna_sc_minimize_pertubation(vrna_fold_compound_t *vc,
const double *q_prob_unpaired,
int objective_function,
double sigma_squared,
double tau_squared,
int algorithm,
int sample_size,
double *epsilon,
double initialStepSize,
double minStepSize,
double minImprovement,
double minimizerTolerance,
progress_callback callback)
{
int iteration_count = 0;
const int max_iterations = 100;
int length = vc->length;
#ifdef VRNA_WITH_GSL
const gsl_multimin_fdfminimizer_type *minimizer_type = 0;
struct {
int type;
const gsl_multimin_fdfminimizer_type *gsl_type;
} algorithms[] =
{ { VRNA_MINIMIZER_CONJUGATE_FR,
gsl_multimin_fdfminimizer_conjugate_fr },
{ VRNA_MINIMIZER_CONJUGATE_PR,
gsl_multimin_fdfminimizer_conjugate_pr },
{ VRNA_MINIMIZER_VECTOR_BFGS,
gsl_multimin_fdfminimizer_vector_bfgs },
{ VRNA_MINIMIZER_VECTOR_BFGS2,
gsl_multimin_fdfminimizer_vector_bfgs2 },
{ VRNA_MINIMIZER_STEEPEST_DESCENT,
gsl_multimin_fdfminimizer_steepest_descent },
{ 0,
NULL } };
int i;
for (i = 0; algorithms[i].type; ++i)
if (algorithms[i].type == algorithm) {
minimizer_type = algorithms[i].gsl_type;
break;
}
if (minimizer_type) {
parameters_gsl parameters;
gsl_multimin_function_fdf fdf;
gsl_multimin_fdfminimizer *minimizer;
gsl_vector *vector;
int status;
parameters.vc = vc;
parameters.q_prob_unpaired = q_prob_unpaired;
parameters.sigma_squared = sigma_squared;
parameters.tau_squared = tau_squared;
parameters.objective_function = objective_function;
parameters.sample_size = sample_size;
fdf.n = length + 1;
fdf.f = &f_gsl;
fdf.df = &df_gsl;
fdf.fdf = &fdf_gsl;
fdf.params = (void *)¶meters;
minimizer = gsl_multimin_fdfminimizer_alloc(minimizer_type, length + 1);
vector = gsl_vector_calloc(length + 1);
/* gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, 0.01, 1e-4); */
gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, initialStepSize, minimizerTolerance);
if (callback)
callback(0, minimizer->f, minimizer->x->data);
do {
++iteration_count;
status = gsl_multimin_fdfminimizer_iterate(minimizer);
if (callback)
callback(iteration_count, minimizer->f, minimizer->x->data);
if (status)
break;
status = gsl_multimin_test_gradient(minimizer->gradient, minimizerTolerance);
} while (status == GSL_CONTINUE && iteration_count < max_iterations);
memcpy(epsilon, minimizer->x->data, sizeof(double) * (length + 1));
gsl_multimin_fdfminimizer_free(minimizer);
gsl_vector_free(vector);
return;
}
#endif /* VRNA_WITH_GSL */
double improvement;
const double min_improvement = minImprovement;
double *new_epsilon = vrna_alloc(sizeof(double) * (length + 1));
double *gradient = vrna_alloc(sizeof(double) * (length + 1));
double score = evaluate_perturbation_vector_score(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
if (callback)
callback(0, score, epsilon);
do {
double new_score;
double step_size;
++iteration_count;
evaluate_perturbation_vector_gradient(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function,
sample_size,
gradient);
/* step_size = 0.5 / calculate_norm(gradient, length);*/
step_size = initialStepSize;
do {
int i;
for (i = 1; i <= length; ++i)
new_epsilon[i] = epsilon[i] - step_size * gradient[i];
new_score = evaluate_perturbation_vector_score(vc,
new_epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
improvement = 1 - new_score / score;
step_size /= 2;
} while ((improvement < min_improvement) && (step_size >= minStepSize));
if (new_score > score)
break;
if (callback)
callback(iteration_count, new_score, new_epsilon);
score = new_score;
memcpy(epsilon, new_epsilon, sizeof(double) * (length + 1));
} while (improvement >= min_improvement && iteration_count < max_iterations);
free(gradient);
free(new_epsilon);
}
|
test_taskargs.c | //===-- test_taskargs.c - Test task creation and argument passing *- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "omp.h"
int main(void) {
int failed = 0;
#pragma omp parallel shared(failed)
{
#pragma omp master
{
int tpvar = 42;
int tpvar2 = 84;
#pragma omp task firstprivate(tpvar, tpvar2)
{
int me = omp_get_thread_num();
fprintf(stderr, "In task in thread %d\n", me);
fflush(stderr);
fprintf(stderr, "%d: &tpvar = %p, &tpvar2 = %p\n", me, &tpvar, &tpvar2);
fflush(stderr);
fprintf(stderr,
"%d: tpvar = %d (should be 42), tpvar2 = %d "
"(should be 84)\n",
me, tpvar, tpvar2);
failed = (tpvar != 42) || (tpvar2 != 84);
fflush(stderr);
}
}
}
printf("***%s***\n", failed ? "FAILED" : "PASSED");
return failed ? EXIT_FAILURE : EXIT_SUCCESS;
}
|
BPMaximumMatching.h | #ifndef BP_MAXIMUM_MATCHING_H
#define BP_MAXIMUM_MATCHING_H
#include "CombBLAS/CombBLAS.h"
#include <mpi.h>
#include <sys/time.h>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <string>
#include <sstream>
#include "MatchingDefs.h"
double tTotalMaximum;
namespace combblas {
/**
* Create a boolean matrix A (not necessarily a permutation matrix)
* Input: ri: a dense vector (actual values in FullyDistVec should be IT)
* ncol: number of columns in the output matrix A
* Output: a boolean matrix A with m=size(ri) and n=ncol (input)
and A[k,ri[k]]=1
* This can be done by Matlab like constructor, no?
*/
template <class IT, class DER>
SpParMat<IT, bool, DER> PermMat (const FullyDistVec<IT,IT> & ri, const IT ncol)
{
IT procsPerRow = ri.commGrid->GetGridCols(); // the number of processor in a row of processor grid
IT procsPerCol = ri.commGrid->GetGridRows(); // the number of processor in a column of processor grid
IT global_nrow = ri.TotalLength();
IT global_ncol = ncol;
IT m_perprocrow = global_nrow / procsPerRow;
IT n_perproccol = global_ncol / procsPerCol;
// The indices for FullyDistVec are offset'd to 1/p pieces
// The matrix indices are offset'd to 1/sqrt(p) pieces
// Add the corresponding offset before sending the data
std::vector< std::vector<IT> > rowid(procsPerRow); // rowid in the local matrix of each vector entry
std::vector< std::vector<IT> > colid(procsPerRow); // colid in the local matrix of each vector entry
IT locvec = ri.arr.size(); // nnz in local vector
IT roffset = ri.RowLenUntil(); // the number of vector elements in this processor row before the current processor
for(typename std::vector<IT>::size_type i=0; i< (unsigned)locvec; ++i)
{
if(ri.arr[i]>=0 && ri.arr[i]<ncol) // this specialized for matching. TODO: make it general purpose by passing a function
{
IT rowrec = (n_perproccol!=0) ? std::min(ri.arr[i] / n_perproccol, procsPerRow-1) : (procsPerRow-1);
// ri's numerical values give the colids and its local indices give rowids
rowid[rowrec].push_back( i + roffset);
colid[rowrec].push_back(ri.arr[i] - (rowrec * n_perproccol));
}
}
int * sendcnt = new int[procsPerRow];
int * recvcnt = new int[procsPerRow];
for(IT i=0; i<procsPerRow; ++i)
{
sendcnt[i] = rowid[i].size();
}
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, ri.commGrid->GetRowWorld()); // share the counts
int * sdispls = new int[procsPerRow]();
int * rdispls = new int[procsPerRow]();
partial_sum(sendcnt, sendcnt+procsPerRow-1, sdispls+1);
partial_sum(recvcnt, recvcnt+procsPerRow-1, rdispls+1);
IT p_nnz = accumulate(recvcnt,recvcnt+procsPerRow, static_cast<IT>(0));
IT * p_rows = new IT[p_nnz];
IT * p_cols = new IT[p_nnz];
IT * senddata = new IT[locvec];
for(int i=0; i<procsPerRow; ++i)
{
copy(rowid[i].begin(), rowid[i].end(), senddata+sdispls[i]);
std::vector<IT>().swap(rowid[i]); // clear memory of rowid
}
MPI_Alltoallv(senddata, sendcnt, sdispls, MPIType<IT>(), p_rows, recvcnt, rdispls, MPIType<IT>(), ri.commGrid->GetRowWorld());
for(int i=0; i<procsPerRow; ++i)
{
copy(colid[i].begin(), colid[i].end(), senddata+sdispls[i]);
std::vector<IT>().swap(colid[i]); // clear memory of colid
}
MPI_Alltoallv(senddata, sendcnt, sdispls, MPIType<IT>(), p_cols, recvcnt, rdispls, MPIType<IT>(), ri.commGrid->GetRowWorld());
delete [] senddata;
std::tuple<IT,IT,bool> * p_tuples = new std::tuple<IT,IT,bool>[p_nnz];
for(IT i=0; i< p_nnz; ++i)
{
p_tuples[i] = make_tuple(p_rows[i], p_cols[i], 1);
}
DeleteAll(p_rows, p_cols);
// Now create the local matrix
IT local_nrow = ri.MyRowLength();
int my_proccol = ri.commGrid->GetRankInProcRow();
IT local_ncol = (my_proccol<(procsPerCol-1))? (n_perproccol) : (global_ncol - (n_perproccol*(procsPerCol-1)));
// infer the concrete type SpMat<IT,IT>
typedef typename create_trait<DER, IT, bool>::T_inferred DER_IT;
DER_IT * PSeq = new DER_IT();
PSeq->Create( p_nnz, local_nrow, local_ncol, p_tuples); // deletion of tuples[] is handled by SpMat::Create
SpParMat<IT,bool,DER_IT> P (PSeq, ri.commGrid);
//Par_DCSC_Bool P (PSeq, ri.commGrid);
return P;
}
/***************************************************************************
// Augment a matching by a set of vertex-disjoint augmenting paths.
// The paths are explored level-by-level similar to the level-synchronous BFS
// This approach is more effecient when we have many short augmenting paths
***************************************************************************/
template <typename IT>
void AugmentLevel(FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, FullyDistVec<IT, IT>& parentsRow, FullyDistVec<IT, IT>& leaves)
{
IT nrow = mateRow2Col.TotalLength();
IT ncol = mateCol2Row.TotalLength();
FullyDistSpVec<IT, IT> col(leaves, [](IT leaf){return leaf!=-1;});
FullyDistSpVec<IT, IT> row(mateRow2Col.getcommgrid(), nrow);
FullyDistSpVec<IT, IT> nextcol(col.getcommgrid(), ncol);
while(col.getnnz()!=0)
{
row = col.Invert(nrow);
row = EWiseApply<IT>(row, parentsRow,
[](IT root, IT parent){return parent;},
[](IT root, IT parent){return true;},
false, (IT)-1);
col = row.Invert(ncol); // children array
nextcol = EWiseApply<IT>(col, mateCol2Row,
[](IT child, IT mate){return mate;},
[](IT child, IT mate){return mate!=-1;},
false, (IT)-1);
mateRow2Col.Set(row);
mateCol2Row.Set(col);
col = nextcol;
}
}
/***************************************************************************
// Augment a matching by a set of vertex-disjoint augmenting paths.
// An MPI processor is responsible for a complete path.
// This approach is more effecient when we have few long augmenting paths
// We used one-sided MPI. Any PGAS language should be fine as well.
// This function is not thread safe, hence multithreading is not used here
***************************************************************************/
template <typename IT>
void AugmentPath(FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row,FullyDistVec<IT, IT>& parentsRow, FullyDistVec<IT, IT>& leaves)
{
MPI_Win win_mateRow2Col, win_mateCol2Row, win_parentsRow;
MPI_Win_create((IT*)mateRow2Col.GetLocArr(), mateRow2Col.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, mateRow2Col.commGrid->GetWorld(), &win_mateRow2Col);
MPI_Win_create((IT*)mateCol2Row.GetLocArr(), mateCol2Row.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, mateCol2Row.commGrid->GetWorld(), &win_mateCol2Row);
MPI_Win_create((IT*)parentsRow.GetLocArr(), parentsRow.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, parentsRow.commGrid->GetWorld(), &win_parentsRow);
IT* leaves_ptr = (IT*) leaves.GetLocArr();
//MPI_Win_fence(0, win_mateRow2Col);
//MPI_Win_fence(0, win_mateCol2Row);
//MPI_Win_fence(0, win_parentsRow);
IT row, col=100, nextrow;
int owner_row, owner_col;
IT locind_row, locind_col;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
for(IT i=0; i<leaves.LocArrSize(); i++)
{
int depth=0;
row = *(leaves_ptr+i);
while(row != - 1)
{
owner_row = mateRow2Col.Owner(row, locind_row);
MPI_Win_lock(MPI_LOCK_SHARED, owner_row, 0, win_parentsRow);
MPI_Get(&col, 1, MPIType<IT>(), owner_row, locind_row, 1, MPIType<IT>(), win_parentsRow);
MPI_Win_unlock(owner_row, win_parentsRow);
owner_col = mateCol2Row.Owner(col, locind_col);
MPI_Win_lock(MPI_LOCK_SHARED, owner_col, 0, win_mateCol2Row);
MPI_Fetch_and_op(&row, &nextrow, MPIType<IT>(), owner_col, locind_col, MPI_REPLACE, win_mateCol2Row);
MPI_Win_unlock(owner_col, win_mateCol2Row);
MPI_Win_lock(MPI_LOCK_SHARED, owner_row, 0, win_mateRow2Col);
MPI_Put(&col, 1, MPIType<IT>(), owner_row, locind_row, 1, MPIType<IT>(), win_mateRow2Col);
MPI_Win_unlock(owner_row, win_mateRow2Col); // we need this otherwise col might get overwritten before communication!
row = nextrow;
}
}
//MPI_Win_fence(0, win_mateRow2Col);
//MPI_Win_fence(0, win_mateCol2Row);
//MPI_Win_fence(0, win_parentsRow);
MPI_Win_free(&win_mateRow2Col);
MPI_Win_free(&win_mateCol2Row);
MPI_Win_free(&win_parentsRow);
}
// Maximum cardinality matching
// Output: mateRow2Col and mateRow2Col
template <typename IT, typename NT,typename DER>
void maximumMatching(SpParMat < IT, NT, DER > & A, FullyDistVec<IT, IT>& mateRow2Col,
FullyDistVec<IT, IT>& mateCol2Row, bool prune=true, bool randMM = false, bool maximizeWeight = false)
{
typedef VertexTypeMM <IT> VertexType;
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
PreAllocatedSPA<VertexType> SPA(A.seq(), nthreads*4);
double tstart = MPI_Wtime();
int nprocs, myrank;
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
IT nrow = A.getnrow();
IT ncol = A.getncol();
FullyDistSpVec<IT, VertexType> fringeRow(A.getcommgrid(), nrow);
FullyDistSpVec<IT, IT> umFringeRow(A.getcommgrid(), nrow);
FullyDistVec<IT, IT> leaves ( A.getcommgrid(), ncol, (IT) -1);
std::vector<std::vector<double> > timing;
std::vector<int> layers;
std::vector<int64_t> phaseMatched;
double t1, time_search, time_augment, time_phase;
bool matched = true;
int phase = 0;
int totalLayer = 0;
IT numUnmatchedCol;
MPI_Win winLeaves;
MPI_Win_create((IT*)leaves.GetLocArr(), leaves.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, A.getcommgrid()->GetWorld(), &winLeaves);
while(matched)
{
time_phase = MPI_Wtime();
std::vector<double> phase_timing(8,0);
leaves.Apply ( [](IT val){return (IT) -1;});
FullyDistVec<IT, IT> parentsRow ( A.getcommgrid(), nrow, (IT) -1);
FullyDistSpVec<IT, VertexType> fringeCol(A.getcommgrid(), ncol);
fringeCol = EWiseApply<VertexType>(fringeCol, mateCol2Row,
[](VertexType vtx, IT mate){return vtx;},
[](VertexType vtx, IT mate){return mate==-1;},
true, VertexType());
if(randMM) //select rand
{
fringeCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx,GlobalMT.rand());});
}
else
{
fringeCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx);});
}
++phase;
numUnmatchedCol = fringeCol.getnnz();
int layer = 0;
time_search = MPI_Wtime();
while(fringeCol.getnnz() > 0)
{
layer++;
t1 = MPI_Wtime();
//TODO: think about this semiring
if(maximizeWeight)
SpMV<WeightMaxMMSR<NT, VertexType>>(A, fringeCol, fringeRow, false, SPA);
else
SpMV<Select2ndMinSR<NT, VertexType>>(A, fringeCol, fringeRow, false, SPA);
phase_timing[0] += MPI_Wtime()-t1;
// remove vertices already having parents
t1 = MPI_Wtime();
fringeRow = EWiseApply<VertexType>(fringeRow, parentsRow,
[](VertexType vtx, IT parent){return vtx;},
[](VertexType vtx, IT parent){return parent==-1;},
false, VertexType());
// Set parent pointer
parentsRow.EWiseApply(fringeRow,
[](IT dval, VertexType svtx){return svtx.parent;},
[](IT dval, VertexType svtx){return true;},
false, VertexType());
umFringeRow = EWiseApply<IT>(fringeRow, mateRow2Col,
[](VertexType vtx, IT mate){return vtx.root;},
[](VertexType vtx, IT mate){return mate==-1;},
false, VertexType());
phase_timing[1] += MPI_Wtime()-t1;
IT nnz_umFringeRow = umFringeRow.getnnz(); // careful about this timing
t1 = MPI_Wtime();
if(nnz_umFringeRow >0)
{
/*
if(nnz_umFringeRow < 25*nprocs)
{
leaves.GSet(umFringeRow,
[](IT valRoot, IT idxLeaf){return valRoot;},
[](IT valRoot, IT idxLeaf){return idxLeaf;},
winLeaves);
// There might be a bug here. It does not return the same output for different number of processes
// e.g., check with g7jac200sc.mtx matrix
}
else*/
{
FullyDistSpVec<IT, IT> temp1(A.getcommgrid(), ncol);
temp1 = umFringeRow.Invert(ncol);
leaves.Set(temp1);
}
}
phase_timing[2] += MPI_Wtime()-t1;
// matched row vertices in the the fringe
fringeRow = EWiseApply<VertexType>(fringeRow, mateRow2Col,
[](VertexType vtx, IT mate){return VertexType(mate, vtx.root);},
[](VertexType vtx, IT mate){return mate!=-1;},
false, VertexType());
t1 = MPI_Wtime();
if(nnz_umFringeRow>0 && prune)
{
fringeRow.FilterByVal (umFringeRow,[](VertexType vtx){return vtx.root;}, false);
}
double tprune = MPI_Wtime()-t1;
phase_timing[3] += tprune;
// Go to matched column from matched row in the fringe. parent is automatically set to itself.
t1 = MPI_Wtime();
fringeCol = fringeRow.Invert(ncol,
[](VertexType& vtx, const IT & index){return vtx.parent;},
[](VertexType& vtx, const IT & index){return vtx;},
[](VertexType& vtx1, VertexType& vtx2){return vtx1;});
phase_timing[4] += MPI_Wtime()-t1;
}
time_search = MPI_Wtime() - time_search;
phase_timing[5] += time_search;
IT numMatchedCol = leaves.Count([](IT leaf){return leaf!=-1;});
phaseMatched.push_back(numMatchedCol);
time_augment = MPI_Wtime();
if (numMatchedCol== 0) matched = false;
else
{
if(numMatchedCol < (2* nprocs * nprocs))
AugmentPath(mateRow2Col, mateCol2Row,parentsRow, leaves);
else
AugmentLevel(mateRow2Col, mateCol2Row,parentsRow, leaves);
}
time_augment = MPI_Wtime() - time_augment;
phase_timing[6] += time_augment;
time_phase = MPI_Wtime() - time_phase;
phase_timing[7] += time_phase;
timing.push_back(phase_timing);
totalLayer += layer;
layers.push_back(layer);
}
MPI_Win_free(&winLeaves);
tTotalMaximum = MPI_Wtime() - tstart;
//isMaximalmatching(A, mateRow2Col, mateCol2Row, unmatchedRow, unmatchedCol);
//isMatching(mateCol2Row, mateRow2Col); //todo there is a better way to check this
// print statistics
double combTime;
if(myrank == 0)
{
std::cout << "****** maximum matching runtime ********\n";
std::cout << std::endl;
std::cout << "========================================================================\n";
std::cout << " BFS Search \n";
std::cout << "===================== ==================================================\n";
std::cout << "Phase Layer Match SpMV EWOpp CmUqL Prun CmMC BFS Aug Total\n";
std::cout << "===================== ===================================================\n";
std::vector<double> totalTimes(timing[0].size(),0);
int nphases = timing.size();
for(int i=0; i<timing.size(); i++)
{
printf(" %3d %3d %8lld ", i+1, layers[i], phaseMatched[i]);
for(int j=0; j<timing[i].size(); j++)
{
totalTimes[j] += timing[i][j];
//timing[i][j] /= timing[i].back();
printf("%.2lf ", timing[i][j]);
}
printf("\n");
}
std::cout << "-----------------------------------------------------------------------\n";
std::cout << "Phase Layer UnMat SpMV EWOpp CmUqL Prun CmMC BFS Aug Total \n";
std::cout << "-----------------------------------------------------------------------\n";
combTime = totalTimes.back();
printf(" %3d %3d %8lld ", nphases, totalLayer/nphases, numUnmatchedCol);
for(int j=0; j<totalTimes.size()-1; j++)
{
printf("%.2lf ", totalTimes[j]);
}
printf("%.2lf\n", combTime);
}
IT nrows=A.getnrow();
IT matchedRow = mateRow2Col.Count([](IT mate){return mate!=-1;});
if(myrank==0)
{
std::cout << "***Final Maximum Matching***\n";
std::cout << "***Total-Rows Matched-Rows Total Time***\n";
printf("%lld %lld %lf \n",nrows, matchedRow, combTime);
printf("matched rows: %lld , which is: %lf percent \n",matchedRow, 100*(double)matchedRow/(nrows));
std::cout << "-------------------------------------------------------\n\n";
}
}
}
#endif
|
ADR_assembler_C_omp.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "mex.h"
#include <stdio.h>
#include <math.h>
#include "blas.h"
#include <string.h>
#define INVJAC(i,j,k) invjac[i+(j+k*dim)*noe]
#define GRADREFPHI(i,j,k) gradrefphi[i+(j+k*NumQuadPoints)*nln]
#ifdef _OPENMP
#include <omp.h>
#else
#warning "OpenMP not enabled. Compile with mex ADR_assembler_C_omp.c CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp""
#endif
void mexFunction(int nlhs,mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
/* Check for proper number of arguments. */
if(nrhs!=15) {
mexErrMsgTxt("15 inputs are required.");
} else if(nlhs>6) {
mexErrMsgTxt("Too many output arguments.");
}
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
/**/
plhs[0] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(nln2*noe,1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
plhs[5] = mxCreateDoubleMatrix(nln*noe,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
double* myMcoef = mxGetPr(plhs[3]);
double* myRrows = mxGetPr(plhs[4]);
double* myRcoef = mxGetPr(plhs[5]);
/* copy the string data from prhs[0] into a C string input_ buf. */
char *OP_string = mxArrayToString(prhs[1]);
int OP[4] = {0, 0, 0, 0};
if (strcmp(OP_string, "diffusion")==0)
{
OP[0] = 1;
}
if (strcmp(OP_string, "transport")==0)
{
OP[1] = 1;
}
if (strcmp(OP_string, "reaction")==0)
{
OP[2] = 1;
}
if (strcmp(OP_string, "source")==0)
{
OP[3] = 1;
}
if (strcmp(OP_string, "all")==0)
{
OP[0] = 1;
OP[1] = 1;
OP[2] = 1;
OP[3] = 1;
}
mxFree(OP_string);
double C_t[dim];
double C_d[dim][dim];
double* TC_d = mxGetPr(prhs[2]);
double* TC_t = mxGetPr(prhs[3]);
int k,l;
for (k = 0; k < dim; k = k + 1 )
{
for (l = 0; l < dim; l = l + 1 )
{
C_d[k][l] = 0;
}
C_t[k] = 0;
}
if ((int)(TC_d[0])==10 && (int)(TC_d[1])==10)
{
for (l = 0; l < dim; l = l + 1 )
{
C_d[l][l] = 1;
}
}
else
{
C_d[(int)(TC_d[0]-1)][(int)(TC_d[1]-1)] = 1;
}
if ((int)(TC_t[0])==10)
{
for (l = 0; l < dim; l = l + 1 )
{
C_t[l] = 1;
}
}
else
{
C_t[(int)(TC_t[0]-1)] = 1;
}
/* Local mass matrix (computed only once) with quadrature nodes */
double LocalMass[nln][nln];
int q;
int NumQuadPoints = mxGetN(prhs[10]);
double* mu = mxGetPr(prhs[6]);
double* conv_field = mxGetPr(prhs[7]);
double* si = mxGetPr(prhs[8]);
double* f = mxGetPr(prhs[9]);
double* w = mxGetPr(prhs[10]);
double* invjac = mxGetPr(prhs[11]);
double* detjac = mxGetPr(prhs[12]);
double* phi = mxGetPr(prhs[13]);
double* gradrefphi = mxGetPr(prhs[14]);
for (k = 0; k < nln; k = k + 1 )
{
for (l = 0; l < nln; l = l + 1 )
{
double tmp = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
tmp = tmp + phi[k+q*nln] * phi[l+q*nln] * w[q];
}
LocalMass[k][l] = tmp;
}
}
double gradphi[dim][nln][NumQuadPoints];
double* elements = mxGetPr(prhs[4]);
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,mu,conv_field,si,f,detjac,elements, myRrows, myRcoef,myAcols, myArows, myAcoef, myMcoef) private(gradphi,ie,k,l,q) firstprivate(phi,gradrefphi, w, numRowsElements, nln2, nln, OP, C_t, C_d, LocalMass)
for (ie = 0; ie < noe; ie = ie + 1 )
{
int d1, d2;
for (k = 0; k < nln; k = k + 1 )
{
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
}
int iii = 0;
int ii = 0;
int a, b;
/* a tes, b trial */
for (a = 0; a < nln; a = a + 1 )
{
for (b = 0; b < nln; b = b + 1 )
{
double aloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
double diffusion = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
diffusion = diffusion + C_d[d1][d2] * mu[ie+q*noe] * gradphi[d1][b][q] * gradphi[d2][a][q];
}
}
double transport = 0;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
transport = transport + C_t[d1] * conv_field[ie+(q+d1*NumQuadPoints)*noe] * gradphi[d1][b][q] * phi[a+q*nln];
}
double reaction = si[ie+q*noe] * phi[b+q*nln] * phi[a+q*nln];
aloc = aloc + (OP[0] * diffusion + OP[1] * transport + OP[2] * reaction) * w[q];
}
myArows[ie*nln2+iii] = elements[a+ie*numRowsElements];
myAcols[ie*nln2+iii] = elements[b+ie*numRowsElements];
myAcoef[ie*nln2+iii] = aloc*detjac[ie];
myMcoef[ie*nln2+iii] = LocalMass[a][b]*detjac[ie];
iii = iii + 1;
}
double floc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
floc = floc + ( OP[3] * phi[a+q*nln] * f[ie+q*noe] ) * w[q];
}
myRrows[ie*nln+ii] = elements[a+ie*numRowsElements];
myRcoef[ie*nln+ii] = floc*detjac[ie];
ii = ii + 1;
}
}
}
|
Multidiagonal_impl.h | #pragma once
#include <Benchmarks/SpMV/ReferenceFormats/Legacy/Multidiagonal.h>
#include <TNL/Containers/Vector.h>
#include <TNL/Math.h>
#include <TNL/Exceptions/NotImplementedError.h>
namespace TNL {
namespace Benchmarks {
namespace SpMV {
namespace ReferenceFormats {
namespace Legacy {
template< typename Device >
class MultidiagonalDeviceDependentCode;
template< typename Real,
typename Device,
typename Index >
Multidiagonal< Real, Device, Index > :: Multidiagonal()
{
};
template< typename Real,
typename Device,
typename Index >
std::string Multidiagonal< Real, Device, Index >::getSerializationType()
{
return "Matrices::Multidiagonal< " +
getType< Real >() +
", " +
Device :: getDeviceType() +
", " +
TNL::getType< Index >() +
" >";
}
template< typename Real,
typename Device,
typename Index >
std::string Multidiagonal< Real, Device, Index >::getSerializationTypeVirtual() const
{
return this->getSerializationType();
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::setDimensions( const IndexType rows,
const IndexType columns )
{
TNL_ASSERT( rows > 0 && columns > 0,
std::cerr << "rows = " << rows
<< " columns = " << columns << std::endl );
Matrix< Real, Device, Index >::setDimensions( rows, columns );
if( this->diagonalsShift.getSize() != 0 )
{
this->values.setSize( min( this->rows, this->columns ) * this->diagonalsShift.getSize() );
this->values.setValue( 0.0 );
}
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::setCompressedRowLengths( ConstRowsCapacitiesTypeView rowLengths )
{
/****
* TODO: implement some check here similar to the one in the tridiagonal matrix
*/
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::setRowCapacities( ConstRowsCapacitiesTypeView rowLengths )
{
setCompressedRowLengths( rowLengths );
}
template< typename Real,
typename Device,
typename Index >
Index Multidiagonal< Real, Device, Index >::getRowLength( const IndexType row ) const
{
IndexType rowLength( 0 );
for( IndexType i = 0; i < diagonalsShift.getSize(); i++ )
{
const IndexType column = row + diagonalsShift.getElement( i );
if( column >= 0 && column < this->getColumns() )
rowLength++;
}
return rowLength;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
Index Multidiagonal< Real, Device, Index >::getRowLengthFast( const IndexType row ) const
{
IndexType rowLength( 0 );
for( IndexType i = 0; i < diagonalsShift.getSize(); i++ )
{
const IndexType column = row + diagonalsShift[ i ];
if( column >= 0 && column < this->getColumns() )
rowLength++;
}
return rowLength;
}
template< typename Real,
typename Device,
typename Index >
Index
Multidiagonal< Real, Device, Index >::
getMaxRowLength() const
{
return diagonalsShift.getSize();
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector >
void Multidiagonal< Real, Device, Index > :: setDiagonals( const Vector& diagonals )
{
TNL_ASSERT( diagonals.getSize() > 0,
std::cerr << "New number of diagonals = " << diagonals.getSize() << std::endl );
this->diagonalsShift.setLike( diagonals );
this->diagonalsShift = diagonals;
if( this->rows != 0 && this->columns != 0 )
{
this->values.setSize( min( this->rows, this->columns ) * this->diagonalsShift.getSize() );
this->values.setValue( 0.0 );
}
}
template< typename Real,
typename Device,
typename Index >
const Containers::Vector< Index, Device, Index >& Multidiagonal< Real, Device, Index > :: getDiagonals() const
{
return this->diagonalsShift;
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
void Multidiagonal< Real, Device, Index > :: setLike( const Multidiagonal< Real2, Device2, Index2 >& matrix )
{
this->setDimensions( matrix.getRows(), matrix.getColumns() );
setDiagonals( matrix.getDiagonals() );
}
template< typename Real,
typename Device,
typename Index >
Index Multidiagonal< Real, Device, Index > :: getNumberOfMatrixElements() const
{
return this->values.getSize();
}
template< typename Real,
typename Device,
typename Index >
Index Multidiagonal< Real, Device, Index > :: getNumberOfNonzeroMatrixElements() const
{
IndexType nonzeroElements = 0;
for( IndexType i = 0; i < this->values.getSize(); i++ )
if( this->values.getElement( i ) != 0 )
nonzeroElements++;
return nonzeroElements;
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index > :: reset()
{
this->rows = 0;
this->columns = 0;
this->values.reset();
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
bool Multidiagonal< Real, Device, Index >::operator == ( const Multidiagonal< Real2, Device2, Index2 >& matrix ) const
{
TNL_ASSERT( this->getRows() == matrix.getRows() &&
this->getColumns() == matrix.getColumns(),
std::cerr << "this->getRows() = " << this->getRows()
<< " matrix.getRows() = " << matrix.getRows()
<< " this->getColumns() = " << this->getColumns()
<< " matrix.getColumns() = " << matrix.getColumns() );
return ( this->diagonals == matrix.diagonals &&
this->values == matrix.values );
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
bool Multidiagonal< Real, Device, Index >::operator != ( const Multidiagonal< Real2, Device2, Index2 >& matrix ) const
{
return ! ( ( *this ) == matrix );
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::setValue( const RealType& v )
{
this->values.setValue( v );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Multidiagonal< Real, Device, Index > :: setElementFast( const IndexType row,
const IndexType column,
const Real& value )
{
IndexType index;
if( ! this->getElementIndexFast( row, column, index ) )
return false;
this->values[ index ] = value;
return true;
}
template< typename Real,
typename Device,
typename Index >
bool Multidiagonal< Real, Device, Index > :: setElement( const IndexType row,
const IndexType column,
const Real& value )
{
IndexType index;
if( ! this->getElementIndex( row, column, index ) )
return false;
this->values.setElement( index, value );
return true;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Multidiagonal< Real, Device, Index > :: addElementFast( const IndexType row,
const IndexType column,
const RealType& value,
const RealType& thisElementMultiplicator )
{
Index index;
if( ! this->getElementIndexFast( row, column, index ) )
return false;
RealType& aux = this->values[ index ];
aux = thisElementMultiplicator * aux + value;
return true;
}
template< typename Real,
typename Device,
typename Index >
bool Multidiagonal< Real, Device, Index > :: addElement( const IndexType row,
const IndexType column,
const RealType& value,
const RealType& thisElementMultiplicator )
{
Index index;
if( ! this->getElementIndex( row, column, index ) )
return false;
this->values.setElement( index, thisElementMultiplicator * this->values.getElement( index ) + value );
return true;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Multidiagonal< Real, Device, Index > :: setRowFast( const IndexType row,
const IndexType* columns,
const RealType* values,
const IndexType numberOfElements )
{
return this->addRowFast( row, columns, values, 0.0 );
}
template< typename Real,
typename Device,
typename Index >
bool Multidiagonal< Real, Device, Index > :: setRow( const IndexType row,
const Index* columns,
const Real* values,
const Index numberOfElements )
{
return this->addRow( row, columns, values, numberOfElements, 0.0 );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Multidiagonal< Real, Device, Index > :: addRowFast( const IndexType row,
const IndexType* columns,
const RealType* values,
const IndexType numberOfElements,
const RealType& thisElementMultiplicator )
{
if( this->diagonalsShift.getSize() < numberOfElements )
return false;
typedef MultidiagonalDeviceDependentCode< Device > DDCType;
const IndexType elements = min( this->diagonalsShift.getSize(), numberOfElements );
IndexType i( 0 );
while( i < elements )
{
const IndexType index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
RealType& aux = this->values[ index ];
aux = thisElementMultiplicator * aux + values[ i ];
i++;
}
while( i < this->diagonalsShift.getSize() )
{
const IndexType index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
this->values[ index ] = 0;
i++;
}
return true;
}
template< typename Real,
typename Device,
typename Index >
bool Multidiagonal< Real, Device, Index > :: addRow( const IndexType row,
const Index* columns,
const Real* values,
const Index numberOfElements,
const RealType& thisElementMultiplicator )
{
if( this->diagonalsShift.getSize() < numberOfElements )
return false;
typedef MultidiagonalDeviceDependentCode< Device > DDCType;
const IndexType elements = min( this->diagonalsShift.getSize(), numberOfElements );
IndexType i( 0 );
while( i < elements )
{
const IndexType index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
if( thisElementMultiplicator == 0.0 )
this->values.setElement( index, values[ i ] );
else
this->values.setElement( index, thisElementMultiplicator * this->values.getElement( index ) + values[ i ] );
i++;
}
while( i < this->diagonalsShift.getSize() )
{
const IndexType index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
this->values.setElement( index, 0 );
i++;
}
return true;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
Real Multidiagonal< Real, Device, Index >::getElementFast( const IndexType row,
const IndexType column ) const
{
Index index;
if( ! this->getElementIndexFast( row, column, index ) )
return 0.0;
return this->values[ index ];
}
template< typename Real,
typename Device,
typename Index >
Real Multidiagonal< Real, Device, Index >::getElement( const IndexType row,
const IndexType column ) const
{
Index index;
if( ! this->getElementIndex( row, column, index ) )
return 0.0;
return this->values.getElement( index );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
void Multidiagonal< Real, Device, Index >::getRowFast( const IndexType row,
IndexType* columns,
RealType* values ) const
{
IndexType pointer( 0 );
for( IndexType i = 0; i < diagonalsShift.getSize(); i++ )
{
const IndexType column = row + diagonalsShift[ i ];
if( column >= 0 && column < this->getColumns() )
{
columns[ pointer ] = column;
values[ pointer ] = this->getElementFast( row, column );
pointer++;
}
}
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
typename Multidiagonal< Real, Device, Index >::MatrixRow
Multidiagonal< Real, Device, Index >::
getRow( const IndexType rowIndex )
{
IndexType firstRowElement( 0 );
while( rowIndex + this->diagonalsShift[ firstRowElement ] < 0 )
firstRowElement ++;
IndexType firstRowElementIndex;
this->getElementIndexFast( rowIndex, rowIndex + this->diagonalsShift[ firstRowElement ], firstRowElementIndex );
if( std::is_same< Device, Devices::Host >::value )
return MatrixRow( &this->values.getData()[ firstRowElementIndex ],
&this->diagonalsShift.getData()[ firstRowElement ],
this->diagonalsShift.getSize() - firstRowElement,
rowIndex,
this->getColumns(),
1 );
if( std::is_same< Device, Devices::Cuda >::value )
return MatrixRow( &this->values.getData()[ firstRowElementIndex ],
&this->diagonalsShift.getData()[ firstRowElement ],
this->diagonalsShift.getSize()- firstRowElement,
rowIndex,
this->getColumns(),
this->rows );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
const typename Multidiagonal< Real, Device, Index >::MatrixRow
Multidiagonal< Real, Device, Index >::
getRow( const IndexType rowIndex ) const
{
IndexType firstRowElement( 0 );
while( rowIndex + this->diagonalsShift[ firstRowElement ] < 0 )
firstRowElement ++;
IndexType firstRowElementIndex;
this->getElementIndexFast( rowIndex, rowIndex + this->diagonalsShift[ firstRowElement ], firstRowElementIndex );
if( std::is_same< Device, Devices::Host >::value )
return MatrixRow( &this->values.getData()[ firstRowElementIndex ],
&this->diagonalsShift.getData()[ firstRowElement ],
this->diagonalsShift.getSize() - firstRowElement,
rowIndex,
this->getColumns(),
1 );
if( std::is_same< Device, Devices::Cuda >::value )
return MatrixRow( &this->values.getData()[ firstRowElementIndex ],
&this->diagonalsShift.getData()[ firstRowElement ],
this->diagonalsShift.getSize()- firstRowElement,
rowIndex,
this->getColumns(),
this->rows );
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector >
__cuda_callable__
typename Vector::RealType Multidiagonal< Real, Device, Index >::rowVectorProduct( const IndexType row,
const Vector& vector ) const
{
typedef MultidiagonalDeviceDependentCode< Device > DDCType;
Real result = 0.0;
for( Index i = 0;
i < this->diagonalsShift.getSize();
i ++ )
{
const Index column = row + this->diagonalsShift[ i ];
if( column >= 0 && column < this->getColumns() )
result += this->values[
DDCType::getElementIndex( this->getRows(),
this->diagonalsShift.getSize(),
row,
i ) ] * vector[ column ];
}
return result;
}
template< typename Real,
typename Device,
typename Index >
template< typename InVector,
typename OutVector >
void Multidiagonal< Real, Device, Index >::vectorProduct( const InVector& inVector,
OutVector& outVector ) const
{
TNL_ASSERT( this->getColumns() == inVector.getSize(),
std::cerr << "Matrix columns: " << this->getColumns() << std::endl
<< "Vector size: " << inVector.getSize() << std::endl );
TNL_ASSERT( this->getRows() == outVector.getSize(),
std::cerr << "Matrix rows: " << this->getRows() << std::endl
<< "Vector size: " << outVector.getSize() << std::endl );
DeviceDependentCode::vectorProduct( *this, inVector, outVector );
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Index2 >
void Multidiagonal< Real, Device, Index > :: addMatrix( const Multidiagonal< Real2, Device, Index2 >& matrix,
const RealType& matrixMultiplicator,
const RealType& thisMatrixMultiplicator )
{
throw Exceptions::NotImplementedError( "Multidiagonal::addMatrix is not implemented." );
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Index2 >
void Multidiagonal< Real, Device, Index >::getTransposition( const Multidiagonal< Real2, Device, Index2 >& matrix,
const RealType& matrixMultiplicator )
{
Containers::Vector< Index > auxDiagonals;
auxDiagonals.setLike( matrix.getDiagonals() );
const Index numberOfDiagonals = matrix.getDiagonals().getSize();
for( Index i = 0; i < numberOfDiagonals; i++ )
auxDiagonals[ i ] = -1.0 * matrix.getDiagonals().getElement( numberOfDiagonals - i - 1 );
this->setDimensions( matrix.getColumns(),
matrix.getRows() );
this->setDiagonals( auxDiagonals );
for( Index row = 0; row < matrix.getRows(); row++ )
for( Index diagonal = 0; diagonal < numberOfDiagonals; diagonal++ )
{
const Index column = row + matrix.getDiagonals().getElement( diagonal );
if( column >= 0 && column < matrix.getColumns() )
this->setElement( column, row, matrixMultiplicator * matrix.getElement( row, column ) );
}
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector1, typename Vector2 >
bool Multidiagonal< Real, Device, Index > :: performSORIteration( const Vector1& b,
const IndexType row,
Vector2& x,
const RealType& omega ) const
{
TNL_ASSERT( row >=0 && row < this->getRows(),
std::cerr << "row = " << row
<< " this->getRows() = " << this->getRows() << std::endl );
RealType diagonalValue( 0.0 );
RealType sum( 0.0 );
const IndexType maxRowLength = this->getMaxRowLength();
for( IndexType i = 0; i < maxRowLength; i++ )
{
const IndexType column = row + this->diagonalsShift[ i ];
if( column >= 0 && column < this->getColumns() )
{
IndexType elementIndex;
this->getElementIndex( row, column, elementIndex );
if( column == row )
diagonalValue = this->values[ elementIndex ];
else
sum += this->values[ elementIndex ] * x[ column ];
}
}
if( diagonalValue == ( Real ) 0.0 )
{
std::cerr << "There is zero on the diagonal in " << row << "-th row of thge matrix. I cannot perform SOR iteration." << std::endl;
return false;
}
x[ row ] = ( 1.0 - omega ) * x[ row ] + omega / diagonalValue * ( b[ row ] - sum );
return true;
}
// copy assignment
template< typename Real,
typename Device,
typename Index >
Multidiagonal< Real, Device, Index >&
Multidiagonal< Real, Device, Index >::operator=( const Multidiagonal& matrix )
{
this->setLike( matrix );
this->values = matrix.values;
this->diagonalsShift = matrix.diagonalsShift;
return *this;
}
// cross-device copy assignment
template< typename Real,
typename Device,
typename Index >
template< typename Real2, typename Device2, typename Index2, typename >
Multidiagonal< Real, Device, Index >&
Multidiagonal< Real, Device, Index >::operator=( const Multidiagonal< Real2, Device2, Index2 >& matrix )
{
static_assert( std::is_same< Device, Devices::Host >::value || std::is_same< Device, Devices::Cuda >::value,
"unknown device" );
static_assert( std::is_same< Device2, Devices::Host >::value || std::is_same< Device2, Devices::Cuda >::value,
"unknown device" );
this->setLike( matrix );
throw Exceptions::NotImplementedError("Cross-device assignment for the Multidiagonal format is not implemented yet.");
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::save( File& file ) const
{
Matrix< Real, Device, Index >::save( file );
file << this->values << this->diagonalsShift;
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::load( File& file )
{
Matrix< Real, Device, Index >::load( file );
file >> this->values >> this->diagonalsShift;
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::save( const String& fileName ) const
{
Object::save( fileName );
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::load( const String& fileName )
{
Object::load( fileName );
}
template< typename Real,
typename Device,
typename Index >
void Multidiagonal< Real, Device, Index >::print( std::ostream& str ) const
{
for( IndexType row = 0; row < this->getRows(); row++ )
{
str <<"Row: " << row << " -> ";
for( IndexType i = 0; i < this->diagonalsShift.getSize(); i++ )
{
const IndexType column = row + diagonalsShift.getElement( i );
if( column >=0 && column < this->columns )
str << " Col:" << column << "->" << this->getElement( row, column ) << "\t";
}
str << std::endl;
}
}
template< typename Real,
typename Device,
typename Index >
bool Multidiagonal< Real, Device, Index >::getElementIndex( const IndexType row,
const IndexType column,
Index& index ) const
{
TNL_ASSERT( row >=0 && row < this->rows,
std::cerr << "row = " << row
<< " this->rows = " << this->rows << std::endl );
TNL_ASSERT( column >=0 && column < this->columns,
std::cerr << "column = " << column
<< " this->columns = " << this->columns << std::endl );
typedef MultidiagonalDeviceDependentCode< Device > DDCType;
IndexType i( 0 );
while( i < this->diagonalsShift.getSize() )
{
if( diagonalsShift.getElement( i ) == column - row )
{
index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
return true;
}
i++;
}
return false;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Multidiagonal< Real, Device, Index >::getElementIndexFast( const IndexType row,
const IndexType column,
Index& index ) const
{
TNL_ASSERT( row >=0 && row < this->rows,
std::cerr << "row = " << row
<< " this->rows = " << this->rows << std::endl );
TNL_ASSERT( column >=0 && column < this->columns,
std::cerr << "column = " << column
<< " this->columns = " << this->columns << std::endl );
typedef MultidiagonalDeviceDependentCode< Device > DDCType;
IndexType i( 0 );
while( i < this->diagonalsShift.getSize() )
{
if( diagonalsShift[ i ] == column - row )
{
index = DDCType::getElementIndex( this->getRows(), this->diagonalsShift.getSize(), row, i );
return true;
}
i++;
}
return false;
}
template<>
class MultidiagonalDeviceDependentCode< Devices::Host >
{
public:
typedef Devices::Host Device;
template< typename Index >
__cuda_callable__
static Index getElementIndex( const Index rows,
const Index diagonals,
const Index row,
const Index diagonal )
{
return row*diagonals + diagonal;
}
template< typename Real,
typename Index,
typename InVector,
typename OutVector >
static void vectorProduct( const Multidiagonal< Real, Device, Index >& matrix,
const InVector& inVector,
OutVector& outVector )
{
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( Index row = 0; row < matrix.getRows(); row ++ )
outVector[ row ] = matrix.rowVectorProduct( row, inVector );
}
};
template<>
class MultidiagonalDeviceDependentCode< Devices::Cuda >
{
public:
typedef Devices::Cuda Device;
template< typename Index >
__cuda_callable__
static Index getElementIndex( const Index rows,
const Index diagonals,
const Index row,
const Index diagonal )
{
return diagonal*rows + row;
}
template< typename Real,
typename Index,
typename InVector,
typename OutVector >
static void vectorProduct( const Multidiagonal< Real, Device, Index >& matrix,
const InVector& inVector,
OutVector& outVector )
{
MatrixVectorProductCuda( matrix, inVector, outVector );
}
};
} //namespace Legacy
} //namespace ReferenceFormats
} //namespace SpMV
} //namespace Benchmarks
} // namespace TNL
|
tree-vectorizer.h | /* Vectorizer
Copyright (C) 2003-2016 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_TREE_VECTORIZER_H
#define GCC_TREE_VECTORIZER_H
#include "tree-data-ref.h"
#include "target.h"
/* Used for naming of new temporaries. */
enum vect_var_kind {
vect_simple_var,
vect_pointer_var,
vect_scalar_var,
vect_mask_var
};
/* Defines type of operation. */
enum operation_type {
unary_op = 1,
binary_op,
ternary_op
};
/* Define type of available alignment support. */
enum dr_alignment_support {
dr_unaligned_unsupported,
dr_unaligned_supported,
dr_explicit_realign,
dr_explicit_realign_optimized,
dr_aligned
};
/* Define type of def-use cross-iteration cycle. */
enum vect_def_type {
vect_uninitialized_def = 0,
vect_constant_def = 1,
vect_external_def,
vect_internal_def,
vect_induction_def,
vect_reduction_def,
vect_double_reduction_def,
vect_nested_cycle,
vect_unknown_def_type
};
/* Define type of reduction. */
enum vect_reduction_type {
TREE_CODE_REDUCTION,
COND_REDUCTION,
INTEGER_INDUC_COND_REDUCTION
};
#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
|| ((D) == vect_double_reduction_def) \
|| ((D) == vect_nested_cycle))
/* Structure to encapsulate information about a group of like
instructions to be presented to the target cost model. */
struct stmt_info_for_cost {
int count;
enum vect_cost_for_stmt kind;
gimple *stmt;
int misalign;
};
typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
/************************************************************************
SLP
************************************************************************/
typedef struct _slp_tree *slp_tree;
/* A computation tree of an SLP instance. Each node corresponds to a group of
stmts to be packed in a SIMD stmt. */
struct _slp_tree {
/* Nodes that contain def-stmts of this node statements operands. */
vec<slp_tree> children;
/* A group of scalar stmts to be vectorized together. */
vec<gimple *> stmts;
/* Load permutation relative to the stores, NULL if there is no
permutation. */
vec<unsigned> load_permutation;
/* Vectorized stmt/s. */
vec<gimple *> vec_stmts;
/* Number of vector stmts that are created to replace the group of scalar
stmts. It is calculated during the transformation phase as the number of
scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
divided by vector size. */
unsigned int vec_stmts_size;
/* Whether the scalar computations use two different operators. */
bool two_operators;
/* The DEF type of this node. */
enum vect_def_type def_type;
};
/* SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts. */
typedef struct _slp_instance {
/* The root of SLP tree. */
slp_tree root;
/* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
unsigned int group_size;
/* The unrolling factor required to vectorized this SLP instance. */
unsigned int unrolling_factor;
/* The group of nodes that contain loads of this SLP instance. */
vec<slp_tree> loads;
} *slp_instance;
/* Access Functions. */
#define SLP_INSTANCE_TREE(S) (S)->root
#define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
#define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
#define SLP_INSTANCE_LOADS(S) (S)->loads
#define SLP_TREE_CHILDREN(S) (S)->children
#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
#define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
#define SLP_TREE_DEF_TYPE(S) (S)->def_type
/* This struct is used to store the information of a data reference,
including the data ref itself, the access offset (calculated by summing its
offset and init) and the segment length for aliasing checks.
This is used to merge alias checks. */
struct dr_with_seg_len
{
dr_with_seg_len (data_reference_p d, tree len)
: dr (d),
offset (size_binop (PLUS_EXPR, DR_OFFSET (d), DR_INIT (d))),
seg_len (len) {}
data_reference_p dr;
tree offset;
tree seg_len;
};
/* This struct contains two dr_with_seg_len objects with aliasing data
refs. Two comparisons are generated from them. */
struct dr_with_seg_len_pair_t
{
dr_with_seg_len_pair_t (const dr_with_seg_len& d1,
const dr_with_seg_len& d2)
: first (d1), second (d2) {}
dr_with_seg_len first;
dr_with_seg_len second;
};
/* Vectorizer state common between loop and basic-block vectorization. */
struct vec_info {
enum { bb, loop } kind;
/* All SLP instances. */
vec<slp_instance> slp_instances;
/* All data references. */
vec<data_reference_p> datarefs;
/* All data dependences. */
vec<ddr_p> ddrs;
/* All interleaving chains of stores, represented by the first
stmt in the chain. */
vec<gimple *> grouped_stores;
/* Cost data used by the target cost model. */
void *target_cost_data;
};
struct _loop_vec_info;
struct _bb_vec_info;
template<>
template<>
inline bool
is_a_helper <_loop_vec_info *>::test (vec_info *i)
{
return i->kind == vec_info::loop;
}
template<>
template<>
inline bool
is_a_helper <_bb_vec_info *>::test (vec_info *i)
{
return i->kind == vec_info::bb;
}
/*-----------------------------------------------------------------*/
/* Info on vectorized loops. */
/*-----------------------------------------------------------------*/
typedef struct _loop_vec_info : public vec_info {
/* The loop to which this info struct refers to. */
struct loop *loop;
/* The loop basic blocks. */
basic_block *bbs;
/* Number of latch executions. */
tree num_itersm1;
/* Number of iterations. */
tree num_iters;
/* Number of iterations of the original loop. */
tree num_iters_unchanged;
/* Threshold of number of iterations below which vectorzation will not be
performed. It is calculated from MIN_PROFITABLE_ITERS and
PARAM_MIN_VECT_LOOP_BOUND. */
unsigned int th;
/* Is the loop vectorizable? */
bool vectorizable;
/* Unrolling factor */
int vectorization_factor;
/* Unknown DRs according to which loop was peeled. */
struct data_reference *unaligned_dr;
/* peeling_for_alignment indicates whether peeling for alignment will take
place, and what the peeling factor should be:
peeling_for_alignment = X means:
If X=0: Peeling for alignment will not be applied.
If X>0: Peel first X iterations.
If X=-1: Generate a runtime test to calculate the number of iterations
to be peeled, using the dataref recorded in the field
unaligned_dr. */
int peeling_for_alignment;
/* The mask used to check the alignment of pointers or arrays. */
int ptr_mask;
/* The loop nest in which the data dependences are computed. */
vec<loop_p> loop_nest;
/* Data Dependence Relations defining address ranges that are candidates
for a run-time aliasing check. */
vec<ddr_p> may_alias_ddrs;
/* Data Dependence Relations defining address ranges together with segment
lengths from which the run-time aliasing check is built. */
vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
/* Statements in the loop that have data references that are candidates for a
runtime (loop versioning) misalignment check. */
vec<gimple *> may_misalign_stmts;
/* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
unsigned slp_unrolling_factor;
/* Reduction cycles detected in the loop. Used in loop-aware SLP. */
vec<gimple *> reductions;
/* All reduction chains in the loop, represented by the first
stmt in the chain. */
vec<gimple *> reduction_chains;
/* Cost vector for a single scalar iteration. */
vec<stmt_info_for_cost> scalar_cost_vec;
/* Cost of a single scalar iteration. */
int single_scalar_iteration_cost;
/* When we have grouped data accesses with gaps, we may introduce invalid
memory accesses. We peel the last iteration of the loop to prevent
this. */
bool peeling_for_gaps;
/* When the number of iterations is not a multiple of the vector size
we need to peel off iterations at the end to form an epilogue loop. */
bool peeling_for_niter;
/* Reductions are canonicalized so that the last operand is the reduction
operand. If this places a constant into RHS1, this decanonicalizes
GIMPLE for other phases, so we must track when this has occurred and
fix it up. */
bool operands_swapped;
/* True if there are no loop carried data dependencies in the loop.
If loop->safelen <= 1, then this is always true, either the loop
didn't have any loop carried data dependencies, or the loop is being
vectorized guarded with some runtime alias checks, or couldn't
be vectorized at all, but then this field shouldn't be used.
For loop->safelen >= 2, the user has asserted that there are no
backward dependencies, but there still could be loop carried forward
dependencies in such loops. This flag will be false if normal
vectorizer data dependency analysis would fail or require versioning
for alias, but because of loop->safelen >= 2 it has been vectorized
even without versioning for alias. E.g. in:
#pragma omp simd
for (int i = 0; i < m; i++)
a[i] = a[i + k] * c;
(or #pragma simd or #pragma ivdep) we can vectorize this and it will
DTRT even for k > 0 && k < m, but without safelen we would not
vectorize this, so this field would be false. */
bool no_data_dependencies;
/* If if-conversion versioned this loop before conversion, this is the
loop version without if-conversion. */
struct loop *scalar_loop;
/* Mark loops having masked stores. */
bool has_mask_store;
} *loop_vec_info;
/* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop
#define LOOP_VINFO_BBS(L) (L)->bbs
#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
#define LOOP_VINFO_NITERS(L) (L)->num_iters
/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
prologue peeling retain total unchanged scalar loop iterations for
cost model. */
#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
#define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
#define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest
#define LOOP_VINFO_DATAREFS(L) (L)->datarefs
#define LOOP_VINFO_DDRS(L) (L)->ddrs
#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
#define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
#define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
#define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
#define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
#define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
#define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
((L)->may_misalign_stmts.length () > 0)
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
((L)->may_alias_ddrs.length () > 0)
#define LOOP_VINFO_NITERS_KNOWN_P(L) \
(tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
static inline loop_vec_info
loop_vec_info_for_loop (struct loop *loop)
{
return (loop_vec_info) loop->aux;
}
static inline bool
nested_in_vect_loop_p (struct loop *loop, gimple *stmt)
{
return (loop->inner
&& (loop->inner == (gimple_bb (stmt))->loop_father));
}
typedef struct _bb_vec_info : public vec_info
{
basic_block bb;
gimple_stmt_iterator region_begin;
gimple_stmt_iterator region_end;
} *bb_vec_info;
#define BB_VINFO_BB(B) (B)->bb
#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
#define BB_VINFO_DATAREFS(B) (B)->datarefs
#define BB_VINFO_DDRS(B) (B)->ddrs
#define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
static inline bb_vec_info
vec_info_for_bb (basic_block bb)
{
return (bb_vec_info) bb->aux;
}
/*-----------------------------------------------------------------*/
/* Info on vectorized defs. */
/*-----------------------------------------------------------------*/
enum stmt_vec_info_type {
undef_vec_info_type = 0,
load_vec_info_type,
store_vec_info_type,
shift_vec_info_type,
op_vec_info_type,
call_vec_info_type,
call_simd_clone_vec_info_type,
assignment_vec_info_type,
condition_vec_info_type,
comparison_vec_info_type,
reduc_vec_info_type,
induc_vec_info_type,
type_promotion_vec_info_type,
type_demotion_vec_info_type,
type_conversion_vec_info_type,
loop_exit_ctrl_vec_info_type
};
/* Indicates whether/how a variable is used in the scope of loop/basic
block. */
enum vect_relevant {
vect_unused_in_scope = 0,
/* The def is in the inner loop, and the use is in the outer loop, and the
use is a reduction stmt. */
vect_used_in_outer_by_reduction,
/* The def is in the inner loop, and the use is in the outer loop (and is
not part of reduction). */
vect_used_in_outer,
/* defs that feed computations that end up (only) in a reduction. These
defs may be used by non-reduction stmts, but eventually, any
computations/values that are affected by these defs are used to compute
a reduction (i.e. don't get stored to memory, for example). We use this
to identify computations that we can change the order in which they are
computed. */
vect_used_by_reduction,
vect_used_in_scope
};
/* The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
uses outside SLP sequences.
In the loop context the meanings of pure and hybrid SLP are slightly
different. By saying that pure SLP is applied to the loop, we mean that we
exploit only intra-iteration parallelism in the loop; i.e., the loop can be
vectorized without doing any conceptual unrolling, cause we don't pack
together stmts from different iterations, only within a single iteration.
Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
and the slp-group-size is 2, in which case we don't have enough parallelism
within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2). */
enum slp_vect_type {
loop_vect = 0,
pure_slp,
hybrid
};
typedef struct data_reference *dr_p;
typedef struct _stmt_vec_info {
enum stmt_vec_info_type type;
/* Indicates whether this stmts is part of a computation whose result is
used outside the loop. */
bool live;
/* Stmt is part of some pattern (computation idiom) */
bool in_pattern_p;
/* The stmt to which this info struct refers to. */
gimple *stmt;
/* The vec_info with respect to which STMT is vectorized. */
vec_info *vinfo;
/* The vector type to be used for the LHS of this statement. */
tree vectype;
/* The vectorized version of the stmt. */
gimple *vectorized_stmt;
/** The following is relevant only for stmts that contain a non-scalar
data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
at most one such data-ref. **/
/* Information about the data-ref (access function, etc),
relative to the inner-most containing loop. */
struct data_reference *data_ref_info;
/* Information about the data-ref relative to this loop
nest (the loop that is being considered for vectorization). */
tree dr_base_address;
tree dr_init;
tree dr_offset;
tree dr_step;
tree dr_aligned_to;
/* For loop PHI nodes, the base and evolution part of it. This makes sure
this information is still available in vect_update_ivs_after_vectorizer
where we may not be able to re-analyze the PHI nodes evolution as
peeling for the prologue loop can make it unanalyzable. The evolution
part is still correct after peeling, but the base may have changed from
the version here. */
tree loop_phi_evolution_base_unchanged;
tree loop_phi_evolution_part;
/* Used for various bookkeeping purposes, generally holding a pointer to
some other stmt S that is in some way "related" to this stmt.
Current use of this field is:
If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
true): S is the "pattern stmt" that represents (and replaces) the
sequence of stmts that constitutes the pattern. Similarly, the
related_stmt of the "pattern stmt" points back to this stmt (which is
the last stmt in the original sequence of stmts that constitutes the
pattern). */
gimple *related_stmt;
/* Used to keep a sequence of def stmts of a pattern stmt if such exists. */
gimple_seq pattern_def_seq;
/* List of datarefs that are known to have the same alignment as the dataref
of this stmt. */
vec<dr_p> same_align_refs;
/* Selected SIMD clone's function info. First vector element
is SIMD clone's function decl, followed by a pair of trees (base + step)
for linear arguments (pair of NULLs for other arguments). */
vec<tree> simd_clone_info;
/* Classify the def of this stmt. */
enum vect_def_type def_type;
/* Whether the stmt is SLPed, loop-based vectorized, or both. */
enum slp_vect_type slp_type;
/* Interleaving and reduction chains info. */
/* First element in the group. */
gimple *first_element;
/* Pointer to the next element in the group. */
gimple *next_element;
/* For data-refs, in case that two or more stmts share data-ref, this is the
pointer to the previously detected stmt with the same dr. */
gimple *same_dr_stmt;
/* The size of the group. */
unsigned int size;
/* For stores, number of stores from this group seen. We vectorize the last
one. */
unsigned int store_count;
/* For loads only, the gap from the previous load. For consecutive loads, GAP
is 1. */
unsigned int gap;
/* The minimum negative dependence distance this stmt participates in
or zero if none. */
unsigned int min_neg_dist;
/* Not all stmts in the loop need to be vectorized. e.g, the increment
of the loop induction variable and computation of array indexes. relevant
indicates whether the stmt needs to be vectorized. */
enum vect_relevant relevant;
/* Is this statement vectorizable or should it be skipped in (partial)
vectorization. */
bool vectorizable;
/* For loads if this is a gather, for stores if this is a scatter. */
bool gather_scatter_p;
/* True if this is an access with loop-invariant stride. */
bool strided_p;
/* For both loads and stores. */
bool simd_lane_access_p;
/* For reduction loops, this is the type of reduction. */
enum vect_reduction_type v_reduc_type;
/* The number of scalar stmt references from active SLP instances. */
unsigned int num_slp_uses;
} *stmt_vec_info;
/* Access Functions. */
#define STMT_VINFO_TYPE(S) (S)->type
#define STMT_VINFO_STMT(S) (S)->stmt
inline loop_vec_info
STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
{
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
return loop_vinfo;
return NULL;
}
inline bb_vec_info
STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
{
if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
return bb_vinfo;
return NULL;
}
#define STMT_VINFO_RELEVANT(S) (S)->relevant
#define STMT_VINFO_LIVE_P(S) (S)->live
#define STMT_VINFO_VECTYPE(S) (S)->vectype
#define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
#define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
#define STMT_VINFO_DATA_REF(S) (S)->data_ref_info
#define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
#define STMT_VINFO_STRIDED_P(S) (S)->strided_p
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
#define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type
#define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_base_address
#define STMT_VINFO_DR_INIT(S) (S)->dr_init
#define STMT_VINFO_DR_OFFSET(S) (S)->dr_offset
#define STMT_VINFO_DR_STEP(S) (S)->dr_step
#define STMT_VINFO_DR_ALIGNED_TO(S) (S)->dr_aligned_to
#define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
#define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
#define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
#define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element
#define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element
#define STMT_VINFO_GROUP_SIZE(S) (S)->size
#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
#define STMT_VINFO_GROUP_GAP(S) (S)->gap
#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
#define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses
#define GROUP_FIRST_ELEMENT(S) (S)->first_element
#define GROUP_NEXT_ELEMENT(S) (S)->next_element
#define GROUP_SIZE(S) (S)->size
#define GROUP_STORE_COUNT(S) (S)->store_count
#define GROUP_GAP(S) (S)->gap
#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
#define STMT_SLP_TYPE(S) (S)->slp_type
struct dataref_aux {
int misalignment;
/* If true the alignment of base_decl needs to be increased. */
bool base_misaligned;
/* If true we know the base is at least vector element alignment aligned. */
bool base_element_aligned;
tree base_decl;
};
#define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux)
#define VECT_MAX_COST 1000
/* The maximum number of intermediate steps required in multi-step type
conversion. */
#define MAX_INTERM_CVT_STEPS 3
/* The maximum vectorization factor supported by any target (V64QI). */
#define MAX_VECTORIZATION_FACTOR 64
extern vec<stmt_vec_info> stmt_vec_info_vec;
void init_stmt_vec_info_vec (void);
void free_stmt_vec_info_vec (void);
/* Return a stmt_vec_info corresponding to STMT. */
static inline stmt_vec_info
vinfo_for_stmt (gimple *stmt)
{
unsigned int uid = gimple_uid (stmt);
if (uid == 0)
return NULL;
return stmt_vec_info_vec[uid - 1];
}
/* Set vectorizer information INFO for STMT. */
static inline void
set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
{
unsigned int uid = gimple_uid (stmt);
if (uid == 0)
{
gcc_checking_assert (info);
uid = stmt_vec_info_vec.length () + 1;
gimple_set_uid (stmt, uid);
stmt_vec_info_vec.safe_push (info);
}
else
{
gcc_checking_assert (info == NULL);
stmt_vec_info_vec[uid - 1] = info;
}
}
/* Return the earlier statement between STMT1 and STMT2. */
static inline gimple *
get_earlier_stmt (gimple *stmt1, gimple *stmt2)
{
unsigned int uid1, uid2;
if (stmt1 == NULL)
return stmt2;
if (stmt2 == NULL)
return stmt1;
uid1 = gimple_uid (stmt1);
uid2 = gimple_uid (stmt2);
if (uid1 == 0 || uid2 == 0)
return NULL;
gcc_checking_assert (uid1 <= stmt_vec_info_vec.length ()
&& uid2 <= stmt_vec_info_vec.length ());
if (uid1 < uid2)
return stmt1;
else
return stmt2;
}
/* Return the later statement between STMT1 and STMT2. */
static inline gimple *
get_later_stmt (gimple *stmt1, gimple *stmt2)
{
unsigned int uid1, uid2;
if (stmt1 == NULL)
return stmt2;
if (stmt2 == NULL)
return stmt1;
uid1 = gimple_uid (stmt1);
uid2 = gimple_uid (stmt2);
if (uid1 == 0 || uid2 == 0)
return NULL;
gcc_assert (uid1 <= stmt_vec_info_vec.length ());
gcc_assert (uid2 <= stmt_vec_info_vec.length ());
if (uid1 > uid2)
return stmt1;
else
return stmt2;
}
/* Return TRUE if a statement represented by STMT_INFO is a part of a
pattern. */
static inline bool
is_pattern_stmt_p (stmt_vec_info stmt_info)
{
gimple *related_stmt;
stmt_vec_info related_stmt_info;
related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (related_stmt
&& (related_stmt_info = vinfo_for_stmt (related_stmt))
&& STMT_VINFO_IN_PATTERN_P (related_stmt_info))
return true;
return false;
}
/* Return true if BB is a loop header. */
static inline bool
is_loop_header_bb_p (basic_block bb)
{
if (bb == (bb->loop_father)->header)
return true;
gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
return false;
}
/* Return pow2 (X). */
static inline int
vect_pow2 (int x)
{
int i, res = 1;
for (i = 0; i < x; i++)
res *= 2;
return res;
}
/* Alias targetm.vectorize.builtin_vectorization_cost. */
static inline int
builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
tree vectype, int misalign)
{
return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
vectype, misalign);
}
/* Get cost by calling cost target builtin. */
static inline
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
{
return builtin_vectorization_cost (type_of_cost, NULL, 0);
}
/* Alias targetm.vectorize.init_cost. */
static inline void *
init_cost (struct loop *loop_info)
{
return targetm.vectorize.init_cost (loop_info);
}
/* Alias targetm.vectorize.add_stmt_cost. */
static inline unsigned
add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info, int misalign,
enum vect_cost_model_location where)
{
return targetm.vectorize.add_stmt_cost (data, count, kind,
stmt_info, misalign, where);
}
/* Alias targetm.vectorize.finish_cost. */
static inline void
finish_cost (void *data, unsigned *prologue_cost,
unsigned *body_cost, unsigned *epilogue_cost)
{
targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
}
/* Alias targetm.vectorize.destroy_cost_data. */
static inline void
destroy_cost_data (void *data)
{
targetm.vectorize.destroy_cost_data (data);
}
/*-----------------------------------------------------------------*/
/* Info on data references alignment. */
/*-----------------------------------------------------------------*/
inline void
set_dr_misalignment (struct data_reference *dr, int val)
{
dataref_aux *data_aux = DR_VECT_AUX (dr);
if (!data_aux)
{
data_aux = XCNEW (dataref_aux);
dr->aux = data_aux;
}
data_aux->misalignment = val;
}
inline int
dr_misalignment (struct data_reference *dr)
{
return DR_VECT_AUX (dr)->misalignment;
}
/* Reflects actual alignment of first access in the vectorized loop,
taking into account peeling/versioning if applied. */
#define DR_MISALIGNMENT(DR) dr_misalignment (DR)
#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
/* Return TRUE if the data access is aligned, and FALSE otherwise. */
static inline bool
aligned_access_p (struct data_reference *data_ref_info)
{
return (DR_MISALIGNMENT (data_ref_info) == 0);
}
/* Return TRUE if the alignment of the data access is known, and FALSE
otherwise. */
static inline bool
known_alignment_for_access_p (struct data_reference *data_ref_info)
{
return (DR_MISALIGNMENT (data_ref_info) != -1);
}
/* Return true if the vect cost model is unlimited. */
static inline bool
unlimited_cost_model (loop_p loop)
{
if (loop != NULL && loop->force_vectorize
&& flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
}
/* Source location */
extern source_location vect_location;
/*-----------------------------------------------------------------*/
/* Function prototypes. */
/*-----------------------------------------------------------------*/
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
struct loop *, edge);
extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree,
unsigned int, bool);
extern void vect_do_peeling_for_alignment (loop_vec_info, tree,
unsigned int, bool);
extern source_location find_loop_location (struct loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
/* In tree-vect-stmts.c. */
extern unsigned int current_vector_size;
extern tree get_vectype_for_scalar_type (tree);
extern tree get_mask_type_for_scalar_type (tree);
extern tree get_same_sized_vectype (tree, tree);
extern bool vect_is_simple_use (tree, vec_info *, gimple **,
enum vect_def_type *);
extern bool vect_is_simple_use (tree, vec_info *, gimple **,
enum vect_def_type *, tree *);
extern bool supportable_widening_operation (enum tree_code, gimple *, tree,
tree, enum tree_code *,
enum tree_code *, int *,
vec<tree> *);
extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
enum tree_code *,
int *, vec<tree> *);
extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *);
extern void free_stmt_vec_info (gimple *stmt);
extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_store_cost (stmt_vec_info, int, bool,
enum vect_def_type, slp_tree,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_load_cost (stmt_vec_info, int, bool, slp_tree,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
extern void vect_finish_stmt_generation (gimple *, gimple *,
gimple_stmt_iterator *);
extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL);
extern tree vect_init_vector (gimple *, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *,
bool *, slp_tree, slp_instance);
extern void vect_remove_stores (gimple *);
extern bool vect_analyze_stmt (gimple *, bool *, slp_tree);
extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
gimple **, tree, int, slp_tree);
extern bool vectorizable_comparison (gimple *, gimple_stmt_iterator *,
gimple **, tree, int, slp_tree);
extern void vect_get_load_cost (struct data_reference *, int, bool,
unsigned int *, unsigned int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *, bool);
extern void vect_get_store_cost (struct data_reference *, int,
unsigned int *, stmt_vector_for_cost *);
extern bool vect_supportable_shift (enum tree_code, tree);
extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
vec<tree> *, slp_tree, int);
extern tree vect_gen_perm_mask_any (tree, const unsigned char *);
extern tree vect_gen_perm_mask_checked (tree, const unsigned char *);
extern void optimize_mask_stores (struct loop*);
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
extern enum dr_alignment_support vect_supportable_dr_alignment
(struct data_reference *, bool);
extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
HOST_WIDE_INT *);
extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
extern bool vect_slp_analyze_instance_dependence (slp_instance);
extern bool vect_enhance_data_refs_alignment (loop_vec_info);
extern bool vect_analyze_data_refs_alignment (loop_vec_info);
extern bool vect_verify_datarefs_alignment (loop_vec_info);
extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
extern bool vect_analyze_data_ref_accesses (vec_info *);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *,
int *);
extern bool vect_analyze_data_refs (vec_info *, int *);
extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool, bool *,
tree = NULL_TREE);
extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *,
gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *,
enum dr_alignment_support, tree,
struct loop **);
extern void vect_transform_grouped_load (gimple *, vec<tree> , int,
gimple_stmt_iterator *);
extern void vect_record_grouped_load_vectors (gimple *, vec<tree> );
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
const char * = NULL);
extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *,
tree, struct loop *,
tree = NULL_TREE);
/* In tree-vect-loop.c. */
/* FORNOW: Used in tree-parloops.c. */
extern void destroy_loop_vec_info (loop_vec_info, bool);
extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, bool,
bool *, bool);
/* Drive for loop analysis stage. */
extern loop_vec_info vect_analyze_loop (struct loop *);
/* Drive for loop transformation stage. */
extern void vect_transform_loop (loop_vec_info);
extern loop_vec_info vect_analyze_loop_form (struct loop *);
extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
gimple **);
extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
gimple **, slp_tree);
extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, gimple **);
extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
extern int vect_min_worthwhile_factor (enum tree_code);
extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance);
extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
gimple_stmt_iterator *, int,
slp_instance, bool);
extern bool vect_slp_analyze_operations (vec<slp_instance> slp_instances,
void *);
extern bool vect_schedule_slp (vec_info *);
extern bool vect_analyze_slp (vec_info *, unsigned);
extern bool vect_make_slp_decision (loop_vec_info);
extern void vect_detect_hybrid_slp (loop_vec_info);
extern void vect_get_slp_defs (vec<tree> , slp_tree,
vec<vec<tree> > *, int);
extern bool vect_slp_bb (basic_block);
extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree);
/* In tree-vect-patterns.c. */
/* Pattern recognition functions.
Additional pattern recognition functions can (and will) be added
in the future. */
typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *);
#define NUM_PATTERNS 14
void vect_pattern_recog (vec_info *);
/* In tree-vectorizer.c. */
unsigned vectorize_loops (void);
void vect_destroy_datarefs (vec_info *);
bool vect_stmt_in_region_p (vec_info *, gimple *);
#endif /* GCC_TREE_VECTORIZER_H */
|
GB_binop__isgt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_int8
// A.*B function (eWiseMult): GB_AemultB__isgt_int8
// A*D function (colscale): GB_AxD__isgt_int8
// D*A function (rowscale): GB_DxB__isgt_int8
// C+=B function (dense accum): GB_Cdense_accumB__isgt_int8
// C+=b function (dense accum): GB_Cdense_accumb__isgt_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int8
// C=scalar+B GB_bind1st__isgt_int8
// C=scalar+B' GB_bind1st_tran__isgt_int8
// C=A+scalar GB_bind2nd__isgt_int8
// C=A'+scalar GB_bind2nd_tran__isgt_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else if (NDim == 6) { \
const int ndim = 6; \
{__VA_ARGS__} \
} else if (NDim == 7) { \
const int ndim = 7; \
{__VA_ARGS__} \
} else if (NDim == 8) { \
const int ndim = 8; \
{__VA_ARGS__} \
} else if (NDim == 9) { \
const int ndim = 9; \
{__VA_ARGS__} \
} else if (NDim == 10) { \
const int ndim = 10; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not bool"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
/*! \brief input is a tensor and the output is a boolean tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is a tensor and a scalar value with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
info.c | // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -gline-tables-only && env LIBOMPTARGET_INFO=63 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda -allow-empty -check-prefix=INFO
// REQUIRES: nvptx64-nvidia-cuda
#include <stdio.h>
#include <omp.h>
#define N 64
#pragma omp declare target
int global;
#pragma omp end declare target
extern void __tgt_set_info_flag(unsigned);
int main() {
int A[N];
int B[N];
int C[N];
int val = 1;
// INFO: CUDA device 0 info: Device supports up to {{[0-9]+}} CUDA blocks and {{[0-9]+}} threads with a warp size of {{[0-9]+}}
// INFO: Libomptarget device 0 info: Entering OpenMP data region at info.c:{{[0-9]+}}:{{[0-9]+}} with 3 arguments:
// INFO: Libomptarget device 0 info: alloc(A[0:64])[256]
// INFO: Libomptarget device 0 info: tofrom(B[0:64])[256]
// INFO: Libomptarget device 0 info: to(C[0:64])[256]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, RefCount=1, Name=A[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, RefCount=1, Name=B[0:64]
// INFO: Libomptarget device 0 info: Copying data from host to device, HstPtr={{.*}}, TgtPtr={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, RefCount=1, Name=C[0:64]
// INFO: Libomptarget device 0 info: Copying data from host to device, HstPtr={{.*}}, TgtPtr={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:{{[0-9]+}}:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 C[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 B[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 A[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: Entering OpenMP kernel at info.c:{{[0-9]+}}:{{[0-9]+}} with 1 arguments:
// INFO: Libomptarget device 0 info: firstprivate(val)[4]
// INFO: CUDA device 0 info: Launching kernel __omp_offloading_{{.*}}main{{.*}} with {{[0-9]+}} blocks and {{[0-9]+}} threads in Generic mode
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:{{[0-9]+}}:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 C[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 B[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 A[0:64] at info.c:{{[0-9]+}}:{{[0-9]+}}
// INFO: Libomptarget device 0 info: Exiting OpenMP data region at info.c:{{[0-9]+}}:{{[0-9]+}} with 3 arguments:
// INFO: Libomptarget device 0 info: alloc(A[0:64])[256]
// INFO: Libomptarget device 0 info: tofrom(B[0:64])[256]
// INFO: Libomptarget device 0 info: to(C[0:64])[256]
// INFO: Libomptarget device 0 info: Copying data from device to host, TgtPtr={{.*}}, HstPtr={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=A[0:64]
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:[[#%u,]]:[[#%u,]]:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration
// INFO: Libomptarget device 0 info: [[#%#x,]] [[#%#x,]] 4 INF unknown at unknown:0:0
#pragma omp target data map(alloc:A[0:N]) map(tofrom:B[0:N]) map(to:C[0:N])
#pragma omp target firstprivate(val)
{ val = 1; }
__tgt_set_info_flag(0x0);
// INFO-NOT: Libomptarget device 0 info: {{.*}}
#pragma omp target
{ }
return 0;
}
|
pngquant.c | /* pngquant.c - quantize the colors in an alphamap down to a specified number
**
** Copyright (C) 1989, 1991 by Jef Poskanzer.
**
** Permission to use, copy, modify, and distribute this software and its
** documentation for any purpose and without fee is hereby granted, provided
** that the above copyright notice appear in all copies and that both that
** copyright notice and this permission notice appear in supporting
** documentation. This software is provided "as is" without express or
** implied warranty.
**
** - - - -
**
** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider.
** © 2009-2015 by Kornel Lesiński.
**
** All rights reserved.
**
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
**
** 1. Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
**
** 2. Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
** AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
** DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
** SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
** CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
** OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**
*/
#define PNGQUANT_VERSION LIQ_VERSION_STRING " (October 2015)"
#define PNGQUANT_USAGE "\
usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\
pngquant [options] [ncolors] - >stdout <stdin\n\n\
options:\n\
--force overwrite existing output files (synonym: -f)\n\
--skip-if-larger only save converted files if they're smaller than original\n\
--output file destination file path to use instead of --ext (synonym: -o)\n\
--ext new.png set custom suffix/extension for output filenames\n\
--quality min-max don't save below min, use fewer colors below max (0-100)\n\
--speed N speed/quality trade-off. 1=slow, 3=default, 11=fast & rough\n\
--nofs disable Floyd-Steinberg dithering\n\
--posterize N output lower-precision color (e.g. for ARGB4444 output)\n\
--verbose print status messages (synonym: -v)\n\
\n\
Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\
The output filename is the same as the input name except that\n\
it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\
input is stdin, in which case the quantized image will go to stdout).\n\
The default behavior if the output file exists is to skip the conversion;\n\
use --force to overwrite. See man page for full list of options.\n"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <getopt.h>
#include <unistd.h>
extern char *optarg;
extern int optind, opterr;
#if defined(WIN32) || defined(__WIN32__)
# include <fcntl.h> /* O_BINARY */
# include <io.h> /* setmode() */
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "rwpng.h" /* typedefs, common macros, public prototypes */
#include "lib/libimagequant.h"
struct pngquant_options {
liq_attr *liq;
liq_image *fixed_palette_image;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
float floyd;
bool using_stdin, using_stdout, force, fast_compression, ie_mode,
min_quality_limit, skip_if_larger,
verbose;
};
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image);
static void set_palette(liq_result *result, png8_image *output_image);
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose);
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options);
static char *add_filename_extension(const char *filename, const char *newext);
static bool file_exists(const char *outname);
static void verbose_printf(struct pngquant_options *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
char buf[required_space];
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context->liq, buf, context->log_callback_user_info);
}
}
static void log_callback(const liq_attr *attr, const char *msg, void* user_info)
{
fprintf(stderr, "%s\n", msg);
}
#ifdef _OPENMP
#define LOG_BUFFER_SIZE 1300
struct buffered_log {
int buf_used;
char buf[LOG_BUFFER_SIZE];
};
static void log_callback_buferred_flush(const liq_attr *attr, void *context)
{
struct buffered_log *log = context;
if (log->buf_used) {
fwrite(log->buf, 1, log->buf_used, stderr);
fflush(stderr);
log->buf_used = 0;
}
}
static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context)
{
struct buffered_log *log = context;
int len = strlen(msg);
if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2;
if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log);
memcpy(&log->buf[log->buf_used], msg, len);
log->buf_used += len+1;
log->buf[log->buf_used-1] = '\n';
log->buf[log->buf_used] = '\0';
}
#endif
static void print_full_version(FILE *fd)
{
fprintf(fd, "pngquant, %s, by Greg Roelofs, Kornel Lesinski.\n"
#ifndef NDEBUG
" WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */
#endif
#if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__))
" SSE acceleration disabled.\n"
#endif
#if _OPENMP
" Compiled with OpenMP (multicore support).\n"
#endif
, PNGQUANT_VERSION);
rwpng_version_info(fd);
fputs("\n", fd);
}
static void print_usage(FILE *fd)
{
fputs(PNGQUANT_USAGE, fd);
}
/**
* N = automatic quality, uses limit unless force is set (N-N or 0-N)
* -N = no better than N (same as 0-N)
* N-M = no worse than N, no better than M
* N- = no worse than N, perfect if possible (same as N-100)
*
* where N,M are numbers between 0 (lousy) and 100 (perfect)
*/
static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit)
{
long limit, target;
const char *str = quality; char *end;
long t1 = strtol(str, &end, 10);
if (str == end) return false;
str = end;
if ('\0' == end[0] && t1 < 0) { // quality="-%d"
target = -t1;
limit = 0;
} else if ('\0' == end[0]) { // quality="%d"
target = t1;
limit = t1*9/10;
} else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-"
target = 100;
limit = t1;
} else { // quality="%d-%d"
long t2 = strtol(str, &end, 10);
if (str == end || t2 > 0) return false;
target = -t2;
limit = t1;
}
*min_quality_limit = (limit > 0);
return LIQ_OK == liq_set_quality(options, limit, target);
}
static const struct {const char *old; const char *newopt;} obsolete_options[] = {
{"-fs","--floyd=1"},
{"-nofs", "--ordered"},
{"-floyd", "--floyd=1"},
{"-nofloyd", "--ordered"},
{"-ordered", "--ordered"},
{"-force", "--force"},
{"-noforce", "--no-force"},
{"-verbose", "--verbose"},
{"-quiet", "--quiet"},
{"-noverbose", "--quiet"},
{"-noquiet", "--verbose"},
{"-help", "--help"},
{"-version", "--version"},
{"-ext", "--ext"},
{"-speed", "--speed"},
};
static void fix_obsolete_options(const unsigned int argc, char *argv[])
{
for(unsigned int argn=1; argn < argc; argn++) {
if ('-' != argv[argn][0]) continue;
if ('-' == argv[argn][1]) break; // stop on first --option or --
for(unsigned int i=0; i < sizeof(obsolete_options)/sizeof(obsolete_options[0]); i++) {
if (0 == strcmp(obsolete_options[i].old, argv[argn])) {
fprintf(stderr, " warning: option '%s' has been replaced with '%s'.\n", obsolete_options[i].old, obsolete_options[i].newopt);
argv[argn] = (char*)obsolete_options[i].newopt;
}
}
}
}
enum {arg_floyd=1, arg_ordered, arg_ext, arg_no_force, arg_iebug,
arg_transbug, arg_map, arg_posterize, arg_skip_larger};
static const struct option long_options[] = {
{"verbose", no_argument, NULL, 'v'},
{"quiet", no_argument, NULL, 'q'},
{"force", no_argument, NULL, 'f'},
{"no-force", no_argument, NULL, arg_no_force},
{"floyd", optional_argument, NULL, arg_floyd},
{"ordered", no_argument, NULL, arg_ordered},
{"nofs", no_argument, NULL, arg_ordered},
{"iebug", no_argument, NULL, arg_iebug},
{"transbug", no_argument, NULL, arg_transbug},
{"ext", required_argument, NULL, arg_ext},
{"skip-if-larger", no_argument, NULL, arg_skip_larger},
{"output", required_argument, NULL, 'o'},
{"speed", required_argument, NULL, 's'},
{"quality", required_argument, NULL, 'Q'},
{"posterize", required_argument, NULL, arg_posterize},
{"map", required_argument, NULL, arg_map},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0},
};
pngquant_error pngquant_file(const char *filename, const char *outname, struct pngquant_options *options);
int main(int argc, char *argv[])
{
struct pngquant_options options = {
.floyd = 1.f, // floyd-steinberg dithering
};
options.liq = liq_attr_create();
if (!options.liq) {
fputs("SSE-capable CPU is required for this build.\n", stderr);
return WRONG_ARCHITECTURE;
}
unsigned int error_count=0, skipped_count=0, file_count=0;
pngquant_error latest_error=SUCCESS;
const char *newext = NULL, *output_file_path = NULL;
fix_obsolete_options(argc, argv);
int opt;
do {
opt = getopt_long(argc, argv, "Vvqfhs:Q:o:", long_options, NULL);
switch (opt) {
case 'v':
options.verbose = true;
break;
case 'q':
options.verbose = false;
break;
case arg_floyd:
options.floyd = optarg ? atof(optarg) : 1.f;
if (options.floyd < 0 || options.floyd > 1.f) {
fputs("--floyd argument must be in 0..1 range\n", stderr);
return INVALID_ARGUMENT;
}
break;
case arg_ordered: options.floyd = 0; break;
case 'f': options.force = true; break;
case arg_no_force: options.force = false; break;
case arg_ext: newext = optarg; break;
case 'o':
if (output_file_path) {
fputs("--output option can be used only once\n", stderr);
return INVALID_ARGUMENT;
}
output_file_path = optarg; break;
case arg_iebug:
// opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0.
liq_set_min_opacity(options.liq, 238);
fputs(" warning: the workaround for IE6 is deprecated\n", stderr);
break;
case arg_transbug:
liq_set_last_index_transparent(options.liq, true);
break;
case arg_skip_larger:
options.skip_if_larger = true;
break;
case 's':
{
int speed = atoi(optarg);
if (speed >= 10) {
options.fast_compression = true;
}
if (speed == 11) {
options.floyd = 0;
speed = 10;
}
if (LIQ_OK != liq_set_speed(options.liq, speed)) {
fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr);
return INVALID_ARGUMENT;
}
}
break;
case 'Q':
if (!parse_quality(optarg, options.liq, &options.min_quality_limit)) {
fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr);
return INVALID_ARGUMENT;
}
break;
case arg_posterize:
if (LIQ_OK != liq_set_min_posterization(options.liq, atoi(optarg))) {
fputs("Posterization should be number of bits in range 0-4.\n", stderr);
return INVALID_ARGUMENT;
}
break;
case arg_map:
{
png24_image tmp = {};
if (SUCCESS != read_image(options.liq, optarg, false, &tmp, &options.fixed_palette_image, false, false)) {
fprintf(stderr, " error: unable to load %s", optarg);
return INVALID_ARGUMENT;
}
}
break;
case 'h':
print_full_version(stdout);
print_usage(stdout);
return SUCCESS;
case 'V':
puts(PNGQUANT_VERSION);
return SUCCESS;
case -1: break;
default:
return INVALID_ARGUMENT;
}
} while (opt != -1);
int argn = optind;
if (argn >= argc) {
if (argn > 1) {
fputs("No input files specified.\n", stderr);
} else {
print_full_version(stderr);
}
print_usage(stderr);
return MISSING_ARGUMENT;
}
if (options.verbose) {
liq_set_log_callback(options.liq, log_callback, NULL);
options.log_callback = log_callback;
}
char *colors_end;
unsigned long colors = strtoul(argv[argn], &colors_end, 10);
if (colors_end != argv[argn] && '\0' == colors_end[0]) {
if (LIQ_OK != liq_set_max_colors(options.liq, colors)) {
fputs("Number of colors must be between 2 and 256.\n", stderr);
return INVALID_ARGUMENT;
}
argn++;
}
if (newext && output_file_path) {
fputs("--ext and --output options can't be used at the same time\n", stderr);
return INVALID_ARGUMENT;
}
// new filename extension depends on options used. Typically basename-fs8.png
if (newext == NULL) {
newext = options.floyd > 0 ? "-ie-fs8.png" : "-ie-or8.png";
if (!options.ie_mode) {
newext += 3; /* skip "-ie" */
}
}
if (argn == argc || (argn == argc-1 && 0==strcmp(argv[argn],"-"))) {
options.using_stdin = true;
options.using_stdout = !output_file_path;
argn = argc-1;
}
const int num_files = argc-argn;
if (output_file_path && num_files != 1) {
fputs("Only one input file is allowed when --output is used\n", stderr);
return INVALID_ARGUMENT;
}
#ifdef _OPENMP
// if there's a lot of files, coarse parallelism can be used
if (num_files > 2*omp_get_max_threads()) {
omp_set_nested(0);
omp_set_dynamic(1);
} else {
omp_set_nested(1);
}
#endif
#pragma omp parallel for \
schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error)
for(int i=0; i < num_files; i++) {
struct pngquant_options opts = options;
opts.liq = liq_attr_copy(options.liq);
const char *filename = opts.using_stdin ? "stdin" : argv[argn+i];
#ifdef _OPENMP
struct buffered_log buf = {};
if (opts.log_callback && omp_get_num_threads() > 1 && num_files > 1) {
liq_set_log_callback(opts.liq, log_callback_buferred, &buf);
liq_set_log_flush_callback(opts.liq, log_callback_buferred_flush, &buf);
options.log_callback = log_callback_buferred;
options.log_callback_user_info = &buf;
}
#endif
pngquant_error retval = SUCCESS;
const char *outname = output_file_path;
char *outname_free = NULL;
if (!options.using_stdout) {
if (!outname) {
outname = outname_free = add_filename_extension(filename, newext);
}
if (!options.force && file_exists(outname)) {
fprintf(stderr, " error: '%s' exists; not overwriting\n", outname);
retval = NOT_OVERWRITING_ERROR;
}
}
if (SUCCESS == retval) {
retval = pngquant_file(filename, outname, &opts);
}
free(outname_free);
liq_attr_destroy(opts.liq);
if (retval) {
#pragma omp critical
{
latest_error = retval;
}
if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) {
skipped_count++;
} else {
error_count++;
}
}
++file_count;
}
if (error_count) {
verbose_printf(&options, "There were errors quantizing %d file%s out of a total of %d file%s.",
error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (skipped_count) {
verbose_printf(&options, "Skipped %d file%s out of a total of %d file%s.",
skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s");
}
if (!skipped_count && !error_count) {
verbose_printf(&options, "No errors detected while quantizing %d image%s.",
file_count, (file_count == 1)? "" : "s");
}
liq_image_destroy(options.fixed_palette_image);
liq_attr_destroy(options.liq);
return latest_error;
}
pngquant_error pngquant_file(const char *filename, const char *outname, struct pngquant_options *options)
{
pngquant_error retval = SUCCESS;
verbose_printf(options, "%s:", filename);
liq_image *input_image = NULL;
png24_image input_image_rwpng = {};
bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout
if (SUCCESS == retval) {
retval = read_image(options->liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->verbose);
}
int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap
png8_image output_image = {};
if (SUCCESS == retval) {
verbose_printf(options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL);
#if USE_LCMS
if (input_image_rwpng.lcms_status == ICCP) {
verbose_printf(options, " used embedded ICC profile to transform image to sRGB colorspace");
} else if (input_image_rwpng.lcms_status == GAMA_CHRM) {
verbose_printf(options, " used gAMA and cHRM chunks to transform image to sRGB colorspace");
} else if (input_image_rwpng.lcms_status == ICCP_WARN_GRAY) {
verbose_printf(options, " warning: ignored ICC profile in GRAY colorspace");
}
#endif
if (input_image_rwpng.gamma != 0.45455) {
verbose_printf(options, " corrected image from gamma %2.1f to sRGB gamma",
1.0/input_image_rwpng.gamma);
}
// when using image as source of a fixed palette the palette is extracted using regular quantization
liq_result *remap = liq_quantize_image(options->liq, options->fixed_palette_image ? options->fixed_palette_image : input_image);
if (remap) {
liq_set_output_gamma(remap, 0.45455); // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2
liq_set_dithering_level(remap, options->floyd);
retval = prepare_output_image(remap, input_image, &output_image);
if (SUCCESS == retval) {
if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) {
retval = OUT_OF_MEMORY_ERROR;
}
set_palette(remap, &output_image);
double palette_error = liq_get_quantization_error(remap);
if (palette_error >= 0) {
quality_percent = liq_get_quantization_quality(remap);
verbose_printf(options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent);
}
}
liq_result_destroy(remap);
} else {
retval = TOO_LOW_QUALITY;
}
}
if (SUCCESS == retval) {
if (options->skip_if_larger) {
// this is very rough approximation, but generally avoid losing more quality than is gained in file size.
// Quality is squared, because even greater savings are needed to justify big quality loss.
double quality = quality_percent/100.0;
output_image.maximum_file_size = (input_image_rwpng.file_size-1) * quality*quality;
}
output_image.fast_compression = options->fast_compression;
output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL;
retval = write_image(&output_image, NULL, outname, options);
if (TOO_LARGE_FILE == retval) {
verbose_printf(options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL);
}
}
if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) {
// when outputting to stdout it'd be nasty to create 0-byte file
// so if quality is too low, output 24-bit original
pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options);
if (write_retval) {
retval = write_retval;
}
}
if (input_image) liq_image_destroy(input_image);
rwpng_free_image24(&input_image_rwpng);
rwpng_free_image8(&output_image);
return retval;
}
static void set_palette(liq_result *result, png8_image *output_image)
{
const liq_palette *palette = liq_get_palette(result);
// tRNS, etc.
output_image->num_palette = palette->count;
output_image->num_trans = 0;
for(unsigned int i=0; i < palette->count; i++) {
liq_color px = palette->entries[i];
if (px.a < 255) {
output_image->num_trans = i+1;
}
output_image->palette[i] = (png_color){.red=px.r, .green=px.g, .blue=px.b};
output_image->trans[i] = px.a;
}
}
static bool file_exists(const char *outname)
{
FILE *outfile = fopen(outname, "rb");
if ((outfile ) != NULL) {
fclose(outfile);
return true;
}
return false;
}
/* build the output filename from the input name by inserting "-fs8" or
* "-or8" before the ".png" extension (or by appending that plus ".png" if
* there isn't any extension), then make sure it doesn't exist already */
static char *add_filename_extension(const char *filename, const char *newext)
{
size_t x = strlen(filename);
char* outname = malloc(x+4+strlen(newext)+1);
if (!outname) return NULL;
strncpy(outname, filename, x);
if (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0) {
strcpy(outname+x-4, newext);
} else {
strcpy(outname+x, newext);
}
return outname;
}
static char *temp_filename(const char *basename) {
size_t x = strlen(basename);
char *outname = malloc(x+1+4);
if (!outname) return NULL;
strcpy(outname, basename);
strcpy(outname+x, ".tmp");
return outname;
}
static void set_binary_mode(FILE *fp)
{
#if defined(WIN32) || defined(__WIN32__)
setmode(fp == stdout ? 1 : 0, O_BINARY);
#endif
}
static const char *filename_part(const char *path)
{
const char *outfilename = strrchr(path, '/');
if (outfilename) {
return outfilename+1;
} else {
return path;
}
}
static bool replace_file(const char *from, const char *to, const bool force) {
#if defined(WIN32) || defined(__WIN32__)
if (force) {
// On Windows rename doesn't replace
unlink(to);
}
#endif
return (0 == rename(from, to));
}
static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options)
{
FILE *outfile;
char *tempname = NULL;
if (options->using_stdout) {
set_binary_mode(stdout);
outfile = stdout;
if (output_image) {
verbose_printf(options, " writing %d-color image to stdout", output_image->num_palette);
} else {
verbose_printf(options, " writing truecolor image to stdout");
}
} else {
tempname = temp_filename(outname);
if (!tempname) return OUT_OF_MEMORY_ERROR;
if ((outfile = fopen(tempname, "wb")) == NULL) {
fprintf(stderr, " error: cannot open '%s' for writing\n", tempname);
free(tempname);
return CANT_WRITE_ERROR;
}
if (output_image) {
verbose_printf(options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname));
} else {
verbose_printf(options, " writing truecolor image as %s", filename_part(outname));
}
}
pngquant_error retval;
#pragma omp critical (libpng)
{
if (output_image) {
retval = rwpng_write_image8(outfile, output_image);
} else {
retval = rwpng_write_image24(outfile, output_image24);
}
}
if (!options->using_stdout) {
fclose(outfile);
if (SUCCESS == retval) {
// Image has been written to a temporary file and then moved over destination.
// This makes replacement atomic and avoids damaging destination file on write error.
if (!replace_file(tempname, outname, options->force)) {
retval = CANT_WRITE_ERROR;
}
}
if (retval) {
unlink(tempname);
}
}
free(tempname);
if (retval && retval != TOO_LARGE_FILE) {
fprintf(stderr, " error: failed writing image to %s\n", outname);
}
return retval;
}
static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose)
{
FILE *infile;
if (using_stdin) {
set_binary_mode(stdin);
infile = stdin;
} else if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, " error: cannot open %s for reading\n", filename);
return READ_ERROR;
}
pngquant_error retval;
#pragma omp critical (libpng)
{
retval = rwpng_read_image24(infile, input_image_p, verbose);
}
if (!using_stdin) {
fclose(infile);
}
if (retval) {
fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename));
return retval;
}
*liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma);
if (!*liq_image_p) {
return OUT_OF_MEMORY_ERROR;
}
if (!keep_input_pixels) {
if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) {
return OUT_OF_MEMORY_ERROR;
}
input_image_p->row_pointers = NULL;
input_image_p->rgba_data = NULL;
}
return SUCCESS;
}
static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image)
{
output_image->width = liq_image_get_width(input_image);
output_image->height = liq_image_get_height(input_image);
output_image->gamma = liq_get_output_gamma(result);
/*
** Step 3.7 [GRR]: allocate memory for the entire indexed image
*/
output_image->indexed_data = malloc(output_image->height * output_image->width);
output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0]));
if (!output_image->indexed_data || !output_image->row_pointers) {
return OUT_OF_MEMORY_ERROR;
}
for(size_t row = 0; row < output_image->height; row++) {
output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width;
}
const liq_palette *palette = liq_get_palette(result);
// tRNS, etc.
output_image->num_palette = palette->count;
output_image->num_trans = 0;
for(unsigned int i=0; i < palette->count; i++) {
if (palette->entries[i].a < 255) {
output_image->num_trans = i+1;
}
}
return SUCCESS;
}
|
declare-variant-5.c | /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
/* { dg-additional-options "-mavx2" } */
typedef float __v4sf __attribute__((vector_size (16)));
typedef int __v4si __attribute__((vector_size (16)));
typedef float __v8sf __attribute__((vector_size (32)));
typedef int __v8si __attribute__((vector_size (32)));
__v4si f1 (__v4sf, __v4sf, float *);
__v8si f2 (__v8sf, __v8sf, float *);
__v4si f3 (__v4si, int, __v4si);
#pragma omp declare variant (f1) match (construct={parallel,for,simd(simdlen(4),notinbranch,uniform(z),aligned(z:4 * sizeof (*z)))})
#pragma omp declare variant (f2) match (construct={for,simd(uniform(z),simdlen(8),notinbranch)})
int f4 (float x, float y, float *z);
#pragma omp declare variant (f3) match (construct={simd(simdlen(4),inbranch,linear(y:1))})
int f5 (int x, int y);
void
test (int *x, float *y, float *z, float *w)
{
#pragma omp parallel
#pragma omp for simd aligned (w:4 * sizeof (float))
for (int i = 0; i < 1024; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp parallel for simd aligned (w:4 * sizeof (float)) simdlen(4)
for (int i = 1024; i < 2048; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp simd aligned (w:4 * sizeof (float))
for (int i = 2048; i < 4096; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp simd
for (int i = 4096; i < 8192; i++)
if (x[i] > 10)
x[i] = f5 (x[i], i);
}
|
GB_binop__bclr_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_uint64
// A.*B function (eWiseMult): GB_AemultB__bclr_uint64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_uint64
// C+=b function (dense accum): GB_Cdense_accumb__bclr_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint64
// C=scalar+B GB_bind1st__bclr_uint64
// C=scalar+B' GB_bind1st_tran__bclr_uint64
// C=A+scalar GB_bind2nd__bclr_uint64
// C=A'+scalar GB_bind2nd_tran__bclr_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITCLR (x, y, uint64_t, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT64 || GxB_NO_BCLR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bclr_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, uint64_t, 64) ; \
}
GrB_Info GB_bind1st_tran__bclr_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, uint64_t, 64) ; \
}
GrB_Info GB_bind2nd_tran__bclr_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
symv_x_csr_n_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include<stdlib.h>
static alphasparse_status_t
symv_x_csr_n_hi_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mule(y[i], beta);
}
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_Number tmp;
for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai)
{
const ALPHA_INT col = A->col_indx[ai];
if(col < i)
{
continue;
}
else if(col == i)
{
alpha_setzero(tmp);
alpha_mul(tmp, alpha, A->values[ai]);
alpha_madde(y_local[tid][i], tmp, x[col]);
}
else
{
alpha_setzero(tmp);
alpha_mul(tmp, alpha, A->values[ai]);
alpha_madde(y_local[tid][col], tmp, x[i]);
alpha_madde(y_local[tid][i], tmp, x[col]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT row = 0; row < m; row++)
for(ALPHA_INT i = 0; i < num_threads; i++)
alpha_adde(y[row], y_local[i][row]);
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_x_csr_n_hi_omp(alpha, A, x, beta, y);
}
|
GB_unaryop__lnot_int32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_int8
// op(A') function: GB_tran__lnot_int32_int8
// C type: int32_t
// A type: int8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_int8
(
int32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_fc64
// A.*B function (eWiseMult): GB_AemultB__rdiv_fc64
// A*D function (colscale): GB_AxD__rdiv_fc64
// D*A function (rowscale): GB_DxB__rdiv_fc64
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_fc64
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_fc64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fc64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fc64
// C=scalar+B GB_bind1st__rdiv_fc64
// C=scalar+B' GB_bind1st_tran__rdiv_fc64
// C=A+scalar GB_bind2nd__rdiv_fc64
// C=A'+scalar GB_bind2nd_tran__rdiv_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_div (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC64_div (y, x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_fc64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rdiv_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_fc64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_fc64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_fc64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_div (aij, x) ; \
}
GrB_Info GB_bind1st_tran__rdiv_fc64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_div (y, aij) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB)
// A.*B function (eWiseMult): GB (_AemultB_08)
// A.*B function (eWiseMult): GB (_AemultB_02)
// A.*B function (eWiseMult): GB (_AemultB_04)
// A.*B function (eWiseMult): GB (_AemultB_bitmap)
// A*D function (colscale): GB (_AxD)
// D*A function (rowscale): GB (_DxB)
// C+=B function (dense accum): GB (_Cdense_accumB)
// C+=b function (dense accum): GB (_Cdense_accumb)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum)
// C=scalar+B GB (_bind1st)
// C=scalar+B' GB (_bind1st_tran)
// C=A+scalar GB (_bind2nd)
// C=A'+scalar GB (_bind2nd_tran)
// C type: GB_ctype
// A type: GB_atype
// A pattern? GB_a_is_pattern
// B type: GB_btype
// B pattern? GB_b_is_pattern
// BinaryOp: GB_binaryop(cij,aij,bij,i,j)
#define GB_ATYPE \
GB_atype
#define GB_BTYPE \
GB_btype
#define GB_CTYPE \
GB_ctype
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
GB_atype_is_btype
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
GB_ctype_is_atype
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
GB_ctype_is_btype
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GB_geta(aij,Ax,pA,A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
GB_a_is_pattern \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GB_getb(bij,Bx,pB,B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
GB_b_is_pattern \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GB_ctype t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
GB_copy_a_to_c(cij,Ax,pA,A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
GB_copy_b_to_c(cij,Bx,pB,B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
GB_binaryop(z,x,y,i,j) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
GB_binaryop_flip
// op is second
#define GB_OP_IS_SECOND \
GB_op_is_second
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
GB_disable
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
if_is_binop_subset
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
endif_is_binop_subset
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
#include "GB_dense_subassign_23_template.c"
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
// get the scalar b for C += b, of type GB_btype
GB_btype bwork = (*((GB_btype *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_AxD)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_DxB)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GB_atype alpha_scalar ;
GB_btype beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GB_atype *) alpha_scalar_in)) ;
beta_scalar = (*((GB_btype *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_08)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_02)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_04)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
if_binop_emult_is_enabled
GrB_Info GB (_AemultB_bitmap)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_emult_is_enabled
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
GrB_Info GB (_bind1st)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype x = (*((GB_atype *) x_input)) ;
GB_btype *Bx = (GB_btype *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GB_getb(bij, Bx, p, false) ;
GB_binaryop(Cx [p], x, bij, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
GrB_Info GB (_bind2nd)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype *Ax = (GB_atype *) Ax_input ;
GB_btype y = (*((GB_btype *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GB_geta(aij, Ax, p, false) ;
GB_binaryop(Cx [p], aij, y, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_getb(aij, Ax, pA, false) ; \
GB_binaryop(Cx [pC], x, aij, 0, 0) ; \
}
GrB_Info GB (_bind1st_tran)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GB_btype
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_atype x = (*((const GB_atype *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GB_atype
}
endif_binop_bind_is_enabled
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind_is_enabled
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_geta(aij, Ax, pA, false) ; \
GB_binaryop(Cx [pC], aij, y, 0, 0) ; \
}
GrB_Info GB (_bind2nd_tran)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_btype y = (*((const GB_btype *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind_is_enabled
#endif
|
template-for-new-benchmark.c | /**
* template.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "../polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is N=1024. */
#include "template-for-new-benchmark.h"
/* Array initialization. */
static
void init_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for private(j ,i )
for (i = 0; i < n; i++)
#pragma omp parallel for private(j) firstprivate(i )
for (j = 0; j < n; j++)
C[i][j] = 42;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_template(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for private(j ,i )
for (i = 0; i < _PB_N; i++)
#pragma omp parallel for private(j) firstprivate(i )
for (j = 0; j < _PB_N; j++)
C[i][j] += 42;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(C));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_template (n, POLYBENCH_ARRAY(C));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
return 0;
}
|
GB_unop__atanh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atanh_fc32_fc32)
// op(A') function: GB (_unop_tran__atanh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = catanhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catanhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = catanhf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atanh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = catanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atanh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gsrb.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void smooth(domain_type * domain, int level, int phi_id, int rhs_id, double a, double b){
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box,s;
int ghosts = domain->ghosts;
// if communication-avoiding, need RHS for stencils in ghost zones
if(ghosts>1)exchange_boundary(domain,level,rhs_id,1,1,1);
for(s=0;s<numSmooths;s+=ghosts){
exchange_boundary(domain,level,phi_id,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding...
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k,ss;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
//int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double h2inv = 1.0/(domain->h[level]*domain->h[level]);
double * __restrict__ phi = domain->subdomains[box].levels[level].grids[ phi_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ phi_new = domain->subdomains[box].levels[level].grids[ phi_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ rhs = domain->subdomains[box].levels[level].grids[ rhs_id] + ghosts*(1+pencil+plane);
double * __restrict__ alpha = domain->subdomains[box].levels[level].grids[__alpha ] + ghosts*(1+pencil+plane);
double * __restrict__ beta_i = domain->subdomains[box].levels[level].grids[__beta_i] + ghosts*(1+pencil+plane);
double * __restrict__ beta_j = domain->subdomains[box].levels[level].grids[__beta_j] + ghosts*(1+pencil+plane);
double * __restrict__ beta_k = domain->subdomains[box].levels[level].grids[__beta_k] + ghosts*(1+pencil+plane);
double * __restrict__ lambda = domain->subdomains[box].levels[level].grids[__lambda] + ghosts*(1+pencil+plane);
double * __restrict__ RedBlack[2] = {domain->subdomains[box].levels[level].RedBlack_FP[0] + ghosts*(1+pencil),
domain->subdomains[box].levels[level].RedBlack_FP[1] + ghosts*(1+pencil)};
int ghostsToOperateOn=ghosts-1;
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
#if defined(__GSRB_CONDITIONAL)
#warning GSRB on every point with conditional assignment for Red-Black
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0-ghostsToOperateOn;k<dim_k+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim_j+ghostsToOperateOn;j++){
#pragma simd always
for(i=0-ghostsToOperateOn;i<dim_i+ghostsToOperateOn;i++){
int ijk = i + j*pencil + k*plane;
int doit = ((i^j^k^ss^1)&1);
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = (doit) ? phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]) : phi[ijk];
}}}
#elif defined(__GSRB_STRIDE2)
#warning GSRB using stride-2 accesses
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0-ghostsToOperateOn;k<dim_k+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim_j+ghostsToOperateOn;j++){
for(i=((j^k^ss^1)&1)+1-ghosts;i<dim_i+ghostsToOperateOn;i+=2){ // stride-2 GSRB
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]);
}}}
#elif defined(__GSRB_FP)
#warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0-ghostsToOperateOn;k<dim_k+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim_j+ghostsToOperateOn;j++){int EvenOdd = (k^ss)&1;
for(i=0-ghostsToOperateOn;i<dim_i+ghostsToOperateOn;i++){
int ij = i + j*pencil;
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - RedBlack[EvenOdd][ij]*lambda[ijk]*(helmholtz-rhs[ijk]); // compiler seems to get confused unless there are disjoint read/write pointers
}}}
#else
#warning GSRB using if-then-else on loop indices for Red-Black
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0-ghostsToOperateOn;k<dim_k+ghostsToOperateOn;k++){
for(j=0-ghostsToOperateOn;j<dim_j+ghostsToOperateOn;j++){
for(i=0-ghostsToOperateOn;i<dim_i+ghostsToOperateOn;i++){
if((i^j^k^ss^1)&1){ // looks very clean when [0] is i,j,k=0,0,0
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]);
}}}}
#endif
} // ss-loop
} // boxes
domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
implicit_task_data.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// This test checks that values stored in task_data in a barrier_begin event
// are still present in the corresponding barrier_end event.
// Therefore, callback implementations different from the ones in callback.h are neccessary.
// This is a test for an issue reported in
// https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39
#define _BSD_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <inttypes.h>
#include <omp.h>
#include <ompt.h>
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_thread_data_t ompt_get_thread_data;
int main()
{
#pragma omp parallel num_threads(4)
{
#pragma omp master
{
sleep(1);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}}
// worker thread implicit barrier at parallel end
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
return 0;
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
task_data->value = ompt_get_unique_id();
if(kind == ompt_sync_region_barrier)
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if(kind == ompt_sync_region_barrier)
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
if(kind == ompt_sync_region_barrier)
printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if(kind == ompt_sync_region_barrier)
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
}
#define register_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_callback(name) register_callback_t(name, name##_t)
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback_t ompt_set_callback;
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback(ompt_callback_thread_begin);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "ios_error.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
register ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
register double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
register ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
colors,
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
colors=number_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
register const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != BlendPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
register ssize_t
i;
ssize_t
y;
for (i=0; i < (ssize_t) number_threads; i++)
(void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
register ssize_t
i;
ssize_t
j;
/*
Assign each pixel whose mean has the least squared color distance.
*/
j=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
j=i;
}
}
kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][j].count++;
kmeans_pixels[id][j].distortion+=min_distance;
SetPixelIndex(image,(Quantum) j,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (i=1; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) image->colors; j++)
{
kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red;
kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green;
kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue;
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black;
kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count;
kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (i=0; i < (ssize_t) image->colors; i++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count);
image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red;
image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green;
image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue;
if (image->alpha_trait != BlendPixelTrait)
image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black;
distortion+=kmeans_pixels[0][i].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(thread_stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
displacement_lagrangemultiplier_mixed_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierMixedContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierMixedContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierMixedContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_solution_norm,lm_increase_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value,dof_value,dof_incr)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
lm_solution_norm += dof_value * dof_value;
lm_increase_norm += dof_incr * dof_incr;
lm_dof_num++;
} else {
residual_dof_value = rb[dof_id];
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
if(lm_increase_norm == 0.0) lm_increase_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
mDispCurrentResidualNorm = disp_residual_solution_norm;
const TDataType lm_ratio = std::sqrt(lm_increase_norm/lm_solution_norm);
const TDataType lm_abs = std::sqrt(lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
TDataType residual_disp_ratio;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the absolute norms
TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio;
r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm == 0.0) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if ( disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierMixedContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
|
pomelo_fmt_plug.c | /*
* POMELO cracker patch for JtR. Hacked together during the Hash Runner 2015
* contest by Dhiru Kholia.
*/
#include "arch.h"
// Enable this format only on little-endian systems
#if ARCH_LITTLE_ENDIAN
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pomelo;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pomelo);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // XXX
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "pomelo"
#define FORMAT_NAME ""
#define FORMAT_TAG "$pomelo$"
#define TAG_LENGTH sizeof(FORMAT_TAG) - 1
#ifdef __AVX2__
#define ALGORITHM_NAME "POMELO 256/256 AVX2 1x"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "POMELO 128/128 SSE2 1x"
#elif !defined(USE_GCC_ASM_IA32) && defined(USE_GCC_ASM_X64)
#define ALGORITHM_NAME "POMELO 64/64"
#else
#define ALGORITHM_NAME "POMELO 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests pomelo_tests[] = {
{"$pomelo$2$3$hash runner 2015$8333ad83e46e425872c5545741d6da105cd31ad58926e437d32247e59b26703e", "HashRunner2014"},
{"$pomelo$2$3$mysalt$b5bebcd9820de6a58dba52abf76aaf6eed4c5c672dbda64e69e3e3cbcc401314", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned char salt[64];
unsigned int saltlen;
unsigned int t_cost;
unsigned int m_cost;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
char Buf[256];
if (strncmp(p, FORMAT_TAG, TAG_LENGTH))
return 0;
p += TAG_LENGTH;
strnzcpy(Buf, p, sizeof(Buf));
p = strtokm(Buf, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || !isdec(p))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) >= sizeof(cur_salt->salt))
return 0;
p = strtokm(NULL, "$");
if (!p || strlen(p) != CIPHERTEXT_LENGTH)
return 0;
while(*p)
if (atoi16l[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH;
cs.t_cost = atoi(p);
p = strchr(p, '$') + 1;
cs.m_cost = atoi(p);
p = strchr(p, '$') + 1;
q = strchr(p, '$');
cs.saltlen = q - p;
strncpy((char*)cs.salt, p, cs.saltlen);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
int i;
char *p = strrchr(ciphertext, '$') + 1;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
memset(out, 0, BINARY_SIZE);
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
int PHS_pomelo(void *out, size_t outlen, const void *in, size_t inlen, const void *salt, size_t saltlen, unsigned int t_cost, unsigned int m_cost);
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
PHS_pomelo((unsigned char *)crypt_out[index], 32, saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->t_cost, cur_salt->m_cost);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void pomelo_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_pomelo = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
pomelo_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
pomelo_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* ARCH_LITTLE_ENDIAN */
|
fci_4pdm.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "fci.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define BLK 48
#define BUFBASE 96
double FCI_t1ci_sf(double *ci0, double *t1, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb);
/*
* t2[:,i,j,k,l] = E^i_j E^k_l|ci0>
*/
static void rdm4_0b_t2(double *ci0, double *t2,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
int i, j, k, l, a, sign, str1;
double *t1 = malloc(sizeof(double) * nb * nnorb);
double *pt1, *pt2;
_LinkT *tab;
// form t1 which has beta^+ beta |t1> => target stra_id
FCI_t1ci_sf(ci0, t1, nb, stra_id, 0,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
#pragma omp parallel default(none) \
shared(t1, t2, bcount, strb_id, norb, nlinkb, clink_indexb), \
private(i, j, k, l, a, str1, sign, pt1, pt2, tab)
{
#pragma omp for schedule(static, 1) nowait
for (k = 0; k < bcount; k++) {
memset(t2+k*n4, 0, sizeof(double)*n4);
tab = clink_indexb + (strb_id+k) * nlinkb;
for (j = 0; j < nlinkb; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pt1 = t1 + str1 * nnorb;
pt2 = t2 + k * n4 + (i*norb+a)*nnorb;
if (sign > 0) {
for (l = 0; l < nnorb; l++) {
pt2[l] += pt1[l];
}
} else {
for (l = 0; l < nnorb; l++) {
pt2[l] -= pt1[l];
}
}
}
}
}
free(t1);
}
/*
* t2[:,i,j,k,l] = E^i_j E^k_l|ci0>
*/
static void rdm4_a_t2(double *ci0, double *t2,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
int i, j, k, l, a, sign, str1;
double *pt1, *pt2;
_LinkT *tab = clink_indexa + stra_id * nlinka;
#pragma omp parallel default(none) \
shared(ci0, t2, bcount, strb_id, norb, na, nb, nlinka, nlinkb, \
clink_indexa, clink_indexb, tab), \
private(i, j, k, l, a, str1, sign, pt1, pt2)
{
double *t1 = malloc(sizeof(double) * bcount * nnorb);
#pragma omp for schedule(static, 40)
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
// form t1 which has alpha^+ alpha |t1> => target stra_id (through str1)
FCI_t1ci_sf(ci0, t1, bcount, str1, strb_id,
norb, na, nb, nlinka, nlinkb,
clink_indexa, clink_indexb);
for (k = 0; k < bcount; k++) {
pt1 = t1 + k * nnorb;
pt2 = t2 + k * n4 + (i*norb+a)*nnorb;
if (sign > 0) {
for (l = 0; l < nnorb; l++) {
pt2[l] += pt1[l];
}
} else {
for (l = 0; l < nnorb; l++) {
pt2[l] -= pt1[l];
}
}
}
}
free(t1);
}
}
void FCI_t2ci_sf(double *ci0, double *t2, int bcount,
int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
rdm4_0b_t2(ci0, t2, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
rdm4_a_t2 (ci0, t2, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
}
static void tril3pdm_particle_symm(double *rdm3, double *tbra, double *t2ket,
int bcount, int ncre, int norb)
{
assert(norb <= BLK);
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
int nnorb = norb * norb;
int n4 = nnorb * nnorb;
int i, j, k, m, n, blk1;
int iblk = MIN(BLK/norb, norb);
int blk = iblk * norb;
//dgemm_(&TRANS_N, &TRANS_T, &n4, &nncre, &bcount,
// &D1, t2ket, &n4, tbra, &nnorb, &D1, rdm3, &n4);
// "upper triangle" F-array[k,j,i], k<=i<=j
for (j = 0; j < ncre; j++) {
for (n = 0; n < norb; n++) {
for (k = 0; k < j+1-iblk; k+=iblk) {
m = k * norb;
i = m + blk;
dgemm_(&TRANS_N, &TRANS_T, &i, &blk, &bcount,
&D1, t2ket, &n4, tbra+m, &nnorb,
&D1, rdm3+m*n4, &n4);
}
m = k * norb;
i = (j+1) * norb;
blk1 = i - m;
dgemm_(&TRANS_N, &TRANS_T, &i, &blk1, &bcount,
&D1, t2ket, &n4, tbra+m, &nnorb,
&D1, rdm3+m*n4, &n4);
t2ket += nnorb;
rdm3 += nnorb;
} }
}
static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket,
int bcount, int ncre, int norb)
{
assert(norb <= BLK);
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
int nnorb = norb * norb;
int nncre = norb * ncre;
int m, n;
int blk = MIN(BLK/norb, norb) * norb;
//dgemm_(&TRANS_N, &TRANS_T, &nncre, &nncre, &bcount,
// &D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb);
// upper triangle part of F-array
for (m = 0; m < nncre-blk; m+=blk) {
n = m + blk;
dgemm_(&TRANS_N, &TRANS_T, &n, &blk, &bcount,
&D1, tket, &nnorb, tbra+m, &nnorb,
&D1, rdm2+m*nnorb, &nnorb);
}
n = nncre - m;
dgemm_(&TRANS_N, &TRANS_T, &nncre, &n, &bcount,
&D1, tket, &nnorb, tbra+m, &nnorb,
&D1, rdm2+m*nnorb, &nnorb);
}
static void make_rdm12_sf(double *rdm1, double *rdm2,
double *bra, double *ket, double *t1bra, double *t1ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int INC1 = 1;
const double D1 = 1;
const int nnorb = norb * norb;
int k, l;
size_t n;
double *tbra = malloc(sizeof(double) * nnorb * bcount);
double *pbra, *pt1;
for (n = 0; n < bcount; n++) {
pbra = tbra + n * nnorb;
pt1 = t1bra + n * nnorb;
for (k = 0; k < norb; k++) {
for (l = 0; l < norb; l++) {
pbra[k*norb+l] = pt1[l*norb+k];
}
}
}
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount,
&D1, t1ket, &nnorb, tbra, &nnorb,
&D1, rdm2, &nnorb);
dgemv_(&TRANS_N, &nnorb, &bcount, &D1, t1ket, &nnorb,
bra+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1);
free(tbra);
}
static void make_rdm12_spin0(double *rdm1, double *rdm2,
double *bra, double *ket, double *t1bra, double *t1ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int INC1 = 1;
const double D1 = 1;
const int nnorb = norb * norb;
int k, l;
size_t n;
double *tbra = malloc(sizeof(double) * nnorb * bcount);
double *pbra, *pt1;
double factor;
for (n = 0; n < bcount; n++) {
if (n+strb_id == stra_id) {
factor = 1;
} else {
factor = 2;
}
pbra = tbra + n * nnorb;
pt1 = t1bra + n * nnorb;
for (k = 0; k < norb; k++) {
for (l = 0; l < norb; l++) {
pbra[k*norb+l] = pt1[l*norb+k] * factor;
}
}
}
dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount,
&D1, t1ket, &nnorb, tbra, &nnorb,
&D1, rdm2, &nnorb);
dgemv_(&TRANS_N, &nnorb, &bcount, &D1, tbra, &nnorb,
bra+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1);
free(tbra);
}
void FCI4pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *rdm4,
double *bra, double *ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
const size_t n6 = nnorb * nnorb * nnorb;
int i, j, k, l, ij;
size_t n;
double *tbra;
double *t1bra = malloc(sizeof(double) * nnorb * bcount * 2);
double *t2bra = malloc(sizeof(double) * n4 * bcount * 2);
double *t1ket = t1bra + nnorb * bcount;
double *t2ket = t2bra + n4 * bcount;
double *pbra, *pt2;
// t2[:,i,j,k,l] = E^i_j E^k_l|ket>
FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
if (bra == ket) {
t1ket = t1bra;
t2ket = t2bra;
} else {
FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(ket, t2ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
}
#pragma omp parallel default(none) \
shared(rdm3, rdm4, t1ket, t2bra, t2ket, norb, bcount), \
private(ij, i, j, k, l, n, tbra, pbra, pt2)
{
tbra = malloc(sizeof(double) * nnorb * bcount);
#pragma omp for schedule(static, 1) nowait
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
for (n = 0; n < bcount; n++) {
for (k = 0; k < norb; k++) {
pbra = tbra + n * nnorb + k*norb;
pt2 = t2bra + n * n4 + k*nnorb + ij;
for (l = 0; l < norb; l++) {
pbra[l] = pt2[l*n3];
}
}
}
i = ij / norb;
j = ij - i * norb;
// contract <bra-of-Eij| with |E^k_l E^m_n ket>
tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket,
bcount, j+1, norb);
// rdm3
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket,
bcount, j+1, norb);
}
free(tbra);
}
make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket,
bcount, stra_id, strb_id, norb, na, nb);
free(t1bra);
free(t2bra);
}
/*
* use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...]
*/
void FCI4pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *rdm4,
double *bra, double *ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
int fill1;
if (strb_id+bcount <= stra_id) {
fill1 = bcount;
} else if (stra_id >= strb_id) {
fill1 = stra_id - strb_id + 1;
} else {
return;
}
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
const size_t n6 = nnorb * nnorb * nnorb;
int i, j, k, l, ij;
size_t n;
double factor;
double *tbra;
double *t1bra = malloc(sizeof(double) * nnorb * fill1 * 2);
double *t2bra = malloc(sizeof(double) * n4 * fill1 * 2);
double *t1ket = t1bra + nnorb * fill1;
double *t2ket = t2bra + n4 * fill1;
double *pbra, *pt2;
FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
if (bra == ket) {
t1ket = t1bra;
t2ket = t2bra;
} else {
FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(ket, t2ket, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
}
#pragma omp parallel default(none) \
shared(rdm3, rdm4, t1ket, t2bra, t2ket, norb, stra_id, strb_id, fill1), \
private(ij, i, j, k, l, n, tbra, pbra, pt2, factor)
{
tbra = malloc(sizeof(double) * nnorb * fill1);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
i = ij / norb;
j = ij - i * norb;
for (n = 0; n < fill1; n++) {
if (n+strb_id == stra_id) {
factor = 1;
} else {
factor = 2;
}
for (k = 0; k <= j; k++) {
pbra = tbra + n * nnorb + k*norb;
pt2 = t2bra + n * n4 + k*nnorb + ij;
for (l = 0; l < norb; l++) {
pbra[l] = pt2[l*n3] * factor;
}
}
}
// contract <bra-of-Eij| with |E^k_l E^m_n ket>
tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket,
fill1, j+1, norb);
// rdm3
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket,
fill1, j+1, norb);
}
free(tbra);
}
make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket,
fill1, stra_id, strb_id, norb, na, nb);
free(t1bra);
free(t2bra);
}
/*
* This function returns incomplete rdm3, rdm4, in which, particle
* permutation symmetry is assumed.
* kernel can be FCI4pdm_kern_sf, FCI4pdm_kern_spin0
*/
void FCIrdm4_drv(void (*kernel)(),
double *rdm1, double *rdm2, double *rdm3, double *rdm4,
double *bra, double *ket,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
const size_t nnorb = norb * norb;
const size_t n4 = nnorb * nnorb;
int ib, strk, bcount;
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
memset(rdm1, 0, sizeof(double) * nnorb);
memset(rdm2, 0, sizeof(double) * n4);
memset(rdm3, 0, sizeof(double) * n4 * nnorb);
memset(rdm4, 0, sizeof(double) * n4 * n4);
for (strk = 0; strk < na; strk++) {
for (ib = 0; ib < nb; ib += BUFBASE) {
bcount = MIN(BUFBASE, nb-ib);
(*kernel)(rdm1, rdm2, rdm3, rdm4,
bra, ket, bcount, strk, ib,
norb, na, nb, nlinka, nlinkb, clinka, clinkb);
}
}
free(clinka);
free(clinkb);
}
void FCI3pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3,
double *bra, double *ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, j, k, l, ij;
size_t n;
double *tbra;
double *t1bra = malloc(sizeof(double) * nnorb * bcount);
double *t1ket = malloc(sizeof(double) * nnorb * bcount);
double *t2bra = malloc(sizeof(double) * n4 * bcount);
double *pbra, *pt2;
// t2[:,i,j,k,l] = E^i_j E^k_l|ket>
FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
#pragma omp parallel default(none) \
shared(rdm3, t1ket, t2bra, norb, bcount), \
private(ij, i, j, k, l, n, tbra, pbra, pt2)
{
tbra = malloc(sizeof(double) * nnorb * bcount);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
for (n = 0; n < bcount; n++) {
pbra = tbra + n * nnorb;
pt2 = t2bra + n * n4 + ij;
for (k = 0; k < norb; k++) {
for (l = 0; l < norb; l++) {
pbra[k*norb+l] = pt2[l*n3+k*nnorb];
}
}
}
i = ij / norb;
j = ij - i * norb;
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket,
bcount, j+1, norb);
}
free(tbra);
}
make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket,
bcount, stra_id, strb_id, norb, na, nb);
free(t1bra);
free(t1ket);
free(t2bra);
}
/*
* use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...]
*/
void FCI3pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3,
double *bra, double *ket,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkT *clink_indexa, _LinkT *clink_indexb)
{
int fill1;
if (strb_id+bcount <= stra_id) {
fill1 = bcount;
} else if (stra_id >= strb_id) {
fill1 = stra_id - strb_id + 1;
} else {
return;
}
const int nnorb = norb * norb;
const int n4 = nnorb * nnorb;
const int n3 = nnorb * norb;
int i, j, k, l, ij;
size_t n;
double factor;
double *tbra;
double *t1bra = malloc(sizeof(double) * nnorb * fill1);
double *t1ket = malloc(sizeof(double) * nnorb * fill1);
double *t2bra = malloc(sizeof(double) * n4 * fill1);
double *pbra, *pt2;
// t2[:,i,j,k,l] = E^i_j E^k_l|ket>
FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id,
norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb);
#pragma omp parallel default(none) \
shared(rdm3, t1ket, t2bra, norb, stra_id, strb_id, fill1), \
private(ij, i, j, k, l, n, tbra, pbra, pt2, factor)
{
tbra = malloc(sizeof(double) * nnorb * fill1);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k)
i = ij / norb;
j = ij - i * norb;
for (n = 0; n < fill1; n++) {
if (n+strb_id == stra_id) {
factor = 1;
} else {
factor = 2;
}
for (k = 0; k <= j; k++) {
pbra = tbra + n * nnorb + k*norb;
pt2 = t2bra + n * n4 + k*nnorb + ij;
for (l = 0; l < norb; l++) {
pbra[l] = pt2[l*n3] * factor;
}
}
}
tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket,
fill1, j+1, norb);
}
free(tbra);
}
make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket,
fill1, stra_id, strb_id, norb, na, nb);
free(t1bra);
free(t1ket);
free(t2bra);
}
/*
* This function returns incomplete rdm3, in which, particle
* permutation symmetry is assumed.
* kernel can be FCI3pdm_kern_ms0, FCI3pdm_kern_spin0
*/
void FCIrdm3_drv(void (*kernel)(),
double *rdm1, double *rdm2, double *rdm3,
double *bra, double *ket,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
const size_t nnorb = norb * norb;
const size_t n4 = nnorb * nnorb;
int ib, strk, bcount;
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na);
_LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb);
FCIcompress_link(clinka, link_indexa, norb, na, nlinka);
FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb);
memset(rdm1, 0, sizeof(double) * nnorb);
memset(rdm2, 0, sizeof(double) * n4);
memset(rdm3, 0, sizeof(double) * n4 * nnorb);
for (strk = 0; strk < na; strk++) {
for (ib = 0; ib < nb; ib += BUFBASE) {
bcount = MIN(BUFBASE, nb-ib);
(*kernel)(rdm1, rdm2, rdm3,
bra, ket, bcount, strk, ib,
norb, na, nb, nlinka, nlinkb, clinka, clinkb);
}
}
free(clinka);
free(clinkb);
}
|
Forces_PS.h | /*
* force_ps.h
*
* Created on: Sep 20, 2016
* Author: isivkov
*/
#ifndef SRC_FORCES_PS_H_
#define SRC_FORCES_PS_H_
#include "../simulation_context.h"
#include "../periodic_function.h"
#include "../augmentation_operator.h"
#include "../Beta_projectors/beta_projectors.h"
#include "../Beta_projectors/beta_projectors_gradient.h"
#include "../potential.h"
#include "../density.h"
namespace sirius
{
class Forces_PS
{
private:
Simulation_context &ctx_;
Density &density_;
Potential &potential_;
K_point_set& kset_;
mdarray<double,2> local_forces_;
mdarray<double,2> ultrasoft_forces_;
mdarray<double,2> nonlocal_forces_;
mdarray<double,2> nlcc_forces_;
mdarray<double,2> ewald_forces_;
template<typename T>
void add_k_point_contribution_to_nonlocal2(K_point& kpoint, mdarray<double,2>& forces)
{
Unit_cell &unit_cell = ctx_.unit_cell();
Beta_projectors &bp = kpoint.beta_projectors();
Beta_projectors_gradient bp_grad(&bp);
// from formula
double main_two_factor = -2.0;
#ifdef __GPU
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
if( bp.proc_unit() == GPU )
{
int nbnd = kpoint.num_occupied_bands(ispn);
kpoint.spinor_wave_functions(ispn).allocate_on_device();
kpoint.spinor_wave_functions(ispn).copy_to_device(0, nbnd);
}
}
#endif
bp_grad.prepare();
bp.prepare();
for (int icnk = 0; icnk < bp.num_beta_chunks(); icnk++)
{
// generate chunk for inner product of beta gradient
bp_grad.generate(icnk);
// generate chunk for inner product of beta
bp.generate(icnk);
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
/* total number of occupied bands for this spin */
int nbnd = kpoint.num_occupied_bands(ispn);
// inner product of beta gradient and WF
bp_grad.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), 0, nbnd);
// get inner product
std::array<matrix<T>, 3> bp_grad_phi_chunk = bp_grad.beta_phi<T>(icnk, nbnd);
// inner product of beta and WF
bp.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), 0, nbnd);
// get inner product
matrix<T> bp_phi_chunk = bp.beta_phi<T>(icnk, nbnd);
splindex<block> spl_nbnd(nbnd, kpoint.comm().size(), kpoint.comm().rank());
int nbnd_loc = spl_nbnd.local_size();
int bnd_offset = spl_nbnd.global_offset();
#pragma omp parallel for
for(int ia_chunk = 0; ia_chunk < bp.beta_chunk(icnk).num_atoms_; ia_chunk++)
{
int ia = bp.beta_chunk(icnk).desc_(3, ia_chunk);
int offs = bp.beta_chunk(icnk).desc_(1, ia_chunk);
int nbf = bp.beta_chunk(icnk).desc_(0, ia_chunk);
int iat = unit_cell.atom(ia).type_id();
// linalg<CPU>::gemm(0, 0, nbf, n__, nbf,
// op_.at<CPU>(packed_mtrx_offset_(ia), ispn__), nbf,
// beta_phi.at<CPU>(offs, 0), nbeta,
// work_.at<CPU>(offs), nbeta);
// mpi
// TODO make in smart way with matrix multiplication
for (int ibnd_loc = 0; ibnd_loc < nbnd_loc; ibnd_loc++)
{
int ibnd = spl_nbnd[ibnd_loc];
auto D_aug_mtrx = [&](int i, int j)
{
if (unit_cell.atom(ia).type().pp_desc().augment) {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd) *
ctx_.augmentation_op(iat).q_mtrx(i, j);
} else {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd);
}
};
for(int ibf = 0; ibf < unit_cell.atom(ia).type().mt_lo_basis_size(); ibf++ )
{
for(int jbf = 0; jbf < unit_cell.atom(ia).type().mt_lo_basis_size(); jbf++ )
{
// calc scalar part of the forces
double_complex scalar_part = main_two_factor *
kpoint.band_occupancy(ibnd + ispn * ctx_.num_fv_states()) * kpoint.weight() *
D_aug_mtrx(ibf, jbf) *
std::conj(bp_phi_chunk(offs + jbf, ibnd));
// multiply scalar part by gradient components
for(int comp: {0,1,2}) forces(comp,ia) += (scalar_part * bp_grad_phi_chunk[comp](offs + ibf, ibnd)).real();
}
}
}
}
}
}
bp.dismiss();
bp_grad.dismiss();
#ifdef __GPU
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
if( bp.proc_unit() == GPU )
{
kpoint.spinor_wave_functions(ispn).deallocate_on_device();
}
}
#endif
}
//---------------------------------------------------------------
//---------------------------------------------------------------
template<typename T>
void add_k_point_contribution_to_nonlocal(K_point& kpoint, mdarray<double,2>& forces)
{
Unit_cell &unit_cell = ctx_.unit_cell();
Beta_projectors &bp = kpoint.beta_projectors();
Beta_projectors_gradient bp_grad(&bp);
// from formula
double main_two_factor = -2.0;
for (int icnk = 0; icnk < bp.num_beta_chunks(); icnk++)
{
// generate chunk for inner product of beta gradient
bp_grad.generate(icnk);
// generate chunk for inner product of beta
bp.generate(icnk);
for (int ispn = 0; ispn < ctx_.num_spins(); ispn++)
{
/* total number of occupied bands for this spin */
int nbnd = kpoint.num_occupied_bands(ispn);
splindex<block> spl_nbnd(nbnd, kpoint.comm().size(), kpoint.comm().rank());
int nbnd_loc = spl_nbnd.local_size();
int bnd_offset = spl_nbnd.global_offset();
printf("rank: %d nbnd: %d nbnd_loc: %d bnd_offset: %d wf_size: %d %d beta_gk_size: %d %d\n",
ctx_.comm().rank(),
nbnd,
nbnd_loc,
bnd_offset,
kpoint.spinor_wave_functions(ispn).pw_coeffs().prime().size(0),
kpoint.spinor_wave_functions(ispn).pw_coeffs().prime().size(1),
bp.beta_gk().size(0),
bp.beta_gk().size(1));
printf("kp vec: %f %f %f \n", kpoint.vk()[0],kpoint.vk()[1], kpoint.vk()[2]);
printf("nl1\n");
// inner product of beta and WF
bp.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), bnd_offset, nbnd_loc);
printf("nl2\n");
// get inner product
matrix<T> bp_phi_chunk = bp.beta_phi<T>(icnk, nbnd_loc);
printf("nl3\n");
// inner product of beta gradient and WF
bp_grad.inner<T>(icnk, kpoint.spinor_wave_functions(ispn), bnd_offset, nbnd_loc);
printf("nl0\n");
// get inner product
std::array<matrix<T>, 3> bp_grad_phi_chunk = bp_grad.beta_phi<T>(icnk, nbnd_loc);
#pragma omp parallel for
for(int ia_chunk = 0; ia_chunk < bp.beta_chunk(icnk).num_atoms_; ia_chunk++)
{
int ia = bp.beta_chunk(icnk).desc_(3, ia_chunk);
int offs = bp.beta_chunk(icnk).desc_(1, ia_chunk);
int iat = unit_cell.atom(ia).type_id();
// mpi
// TODO make in smart way with matrix multiplication
for (int ibnd_loc = 0; ibnd_loc < nbnd_loc; ibnd_loc++)
{
int ibnd = spl_nbnd[ibnd_loc];
auto D_aug_mtrx = [&](int i, int j)
{
if (unit_cell.atom(ia).type().pp_desc().augment) {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd) *
ctx_.augmentation_op(iat).q_mtrx(i, j);
} else {
return unit_cell.atom(ia).d_mtrx(i, j, ispn) - kpoint.band_energy(ibnd);
}
};
for(int ibf = 0; ibf < unit_cell.atom(ia).type().mt_lo_basis_size(); ibf++ )
{
for(int jbf = 0; jbf < unit_cell.atom(ia).type().mt_lo_basis_size(); jbf++ )
{
// calc scalar part of the forces
double_complex scalar_part = main_two_factor *
kpoint.band_occupancy(ibnd + ispn * ctx_.num_fv_states()) * kpoint.weight() *
D_aug_mtrx(ibf, jbf) *
std::conj(bp_phi_chunk(offs + jbf, ibnd_loc));
// multiply scalar part by gradient components
for(int comp: {0,1,2}) forces(comp,ia) += (scalar_part * bp_grad_phi_chunk[comp](offs + ibf, ibnd_loc)).real();
}
}
}
}
}
}
}
void symmetrize_forces(mdarray<double,2>& unsym_forces, mdarray<double,2>& sym_forces );
public:
Forces_PS(Simulation_context &ctx__,
Density& density__,
Potential& potential__,
K_point_set& kset__)
: ctx_(ctx__)
, density_(density__)
, potential_(potential__)
, kset_(kset__)
{
local_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
ultrasoft_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
nonlocal_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
nlcc_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
ewald_forces_ = mdarray<double,2>(3, ctx_.unit_cell().num_atoms());
}
void calc_local_forces(mdarray<double,2>& forces);
void calc_ultrasoft_forces(mdarray<double,2>& forces);
void calc_nonlocal_forces(mdarray<double,2>& forces);
void calc_nlcc_forces(mdarray<double,2>& forces);
void calc_ewald_forces(mdarray<double,2>& forces);
void calc_forces_contributions();
mdarray<double,2> const& local_forces()
{
return local_forces_;
}
mdarray<double,2> const& ultrasoft_forces()
{
return ultrasoft_forces_;
}
mdarray<double,2> const& nonlocal_forces()
{
return nonlocal_forces_;
}
mdarray<double,2> const& nlcc_forces()
{
return nlcc_forces_;
}
mdarray<double,2> const& ewald_forces()
{
return ewald_forces_;
}
mdarray<double,2> sum_forces();
void sum_forces(mdarray<double,2>& inout_total_forces);
};
}
#endif /* SRC_FORCES_PS_H_ */
|
rose_slowInput.c | #include "omp.h"
typedef double real8;
/************************************************************************
* Function : StressZero
*
* Purpose :
************************************************************************/
void StressZero(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,const real8 *fun2j,const real8 *shearMod,real8 eosvmax,real8 stresscut,const int *zoneset,const real8 *vc,int length)
{
int i;
int index;
/* This value 1.e-20 is used to prevent underflow. It is NOT a
cuttoff. DO NOT TOUCH THIS VALE. */
real8 stress2 = stresscut * 1.e-20;
real8 nstres2 = -stress2;
#pragma omp parallel for private (index,i) firstprivate (length,stress2)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
if (shearMod[zoneset[i]] == 0.0 || fun2j[i] < stresscut || vc[i] >= eosvmax) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
newSzz[i] = 0.0;
newTxy[i] = 0.0;
newTxz[i] = 0.0;
newTyz[i] = 0.0;
}
#if 1
if (newSxx[i] < stress2 && newSxx[i] > nstres2)
newSxx[i] = 0.;
if (newSyy[i] < stress2 && newSyy[i] > nstres2)
newSyy[i] = 0.;
if (newSzz[i] < stress2 && newSzz[i] > nstres2)
newSzz[i] = 0.;
if (newTxy[i] < stress2 && newTxy[i] > nstres2)
newTxy[i] = 0.;
if (newTxz[i] < stress2 && newTxz[i] > nstres2)
newTxz[i] = 0.;
if (newTyz[i] < stress2 && newTyz[i] > nstres2)
newTyz[i] = 0.;
#endif
}
}
|
lis_precon_ads.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
/************************************************
* lis_precon_create
* lis_psolve
* lis_psolvet
************************************************/
#undef __FUNC__
#define __FUNC__ "lis_precon_create_adds"
LIS_INT lis_precon_create_adds(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT i,j;
LIS_INT precon_type,worklen;
LIS_INT err;
LIS_VECTOR *work;
LIS_DEBUG_FUNC_IN;
precon_type = solver->options[LIS_OPTIONS_PRECON];
worklen = 2;
work = (LIS_VECTOR *)lis_malloc( worklen*sizeof(LIS_VECTOR),"lis_precon_create_adds::work" );
if( work==NULL )
{
LIS_SETERR_MEM(worklen*sizeof(LIS_VECTOR));
return LIS_OUT_OF_MEMORY;
}
if( solver->precision==LIS_PRECISION_DEFAULT )
{
for(i=0;i<worklen;i++)
{
err = lis_vector_duplicate(solver->A,&work[i]);
if( err ) break;
}
}
else
{
for(i=0;i<worklen;i++)
{
err = lis_vector_duplicateex(LIS_PRECISION_QUAD,solver->A,&work[i]);
if( err ) break;
}
}
if( i<worklen )
{
for(j=0;j<i;j++) lis_vector_destroy(work[j]);
lis_free(work);
return err;
}
precon->worklen = worklen;
precon->work = work;
err = lis_precon_create_xxx[precon_type](solver,precon);
if( err )
{
lis_precon_destroy(precon);
return err;
}
precon->A = solver->A;
precon->is_copy = LIS_FALSE;
LIS_DEBUG_FUNC_OUT;
return err;
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_adds"
LIS_INT lis_psolve_adds(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,k,n,np,iter,ptype;
LIS_SCALAR *b,*x,*w,*r,*rl;
LIS_VECTOR W,R;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
n = precon->A->n;
np = precon->A->np;
W = precon->work[0];
R = precon->work[1];
b = B->value;
x = X->value;
w = W->value;
r = R->value;
rl = R->value_lo;
iter = solver->options[LIS_OPTIONS_ADDS_ITER];
ptype = solver->options[LIS_OPTIONS_PRECON];
#ifdef USE_QUAD_PRECISION
if( solver->precision==LIS_PRECISION_DEFAULT )
{
#endif
lis_vector_set_all(0.0,X);
lis_vector_copy(B,R);
for(k=0;k<iter+1;k++)
{
for(i=n;i<np;i++)
{
r[i] = 0.0;
}
lis_psolve_xxx[ptype](solver,R,W);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
x[i] += w[i];
}
if(k!=iter)
{
lis_matvec(precon->A,X,R);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
r[i] = b[i] - r[i];
}
}
}
#ifdef USE_QUAD_PRECISION
}
else
{
lis_vector_set_allex_nm(0.0,X);
lis_vector_copyex_mm(B,R);
for(k=0;k<iter+1;k++)
{
for(i=n;i<np;i++)
{
r[i] = 0.0;
rl[i] = 0.0;
}
lis_psolve_xxx[ptype](solver,R,W);
for(i=0;i<n;i++)
{
#ifndef USE_SSE2
LIS_QUAD_ADD(X->value[i],X->value_lo[i],X->value[i],X->value_lo[i],W->value[i],W->value_lo[i]);
#else
LIS_QUAD_ADD_SSE2(X->value[i],X->value_lo[i],X->value[i],X->value_lo[i],W->value[i],W->value_lo[i]);
#endif
/* x[i] += w[i];*/
}
if(k==iter) break;
lis_matvec(precon->A,X,R);
for(i=0;i<n;i++)
{
#ifndef USE_SSE2
LIS_QUAD_ADD(R->value[i],R->value_lo[i],B->value[i],B->value_lo[i],-R->value[i],-R->value_lo[i]);
#else
LIS_QUAD_ADD_SSE2(R->value[i],R->value_lo[i],B->value[i],B->value_lo[i],-R->value[i],-R->value_lo[i]);
#endif
/* r[i] = b[i] - r[i];*/
}
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_adds"
LIS_INT lis_psolvet_adds(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,k,n,np,iter,ptype;
LIS_SCALAR *b,*x,*w,*r;
LIS_VECTOR W,R;
LIS_PRECON precon;
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
n = precon->A->n;
np = precon->A->np;
W = precon->work[0];
R = precon->work[1];
b = B->value;
x = X->value;
w = W->value;
r = R->value;
iter = solver->options[LIS_OPTIONS_ADDS_ITER];
ptype = solver->options[LIS_OPTIONS_PRECON];
if( solver->precision==LIS_PRECISION_DEFAULT )
{
lis_vector_set_all(0.0,X);
lis_vector_copy(B,R);
for(k=0;k<iter+1;k++)
{
for(i=n;i<np;i++)
{
r[i] = 0.0;
}
lis_psolvet_xxx[ptype](solver,R,W);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
x[i] += w[i];
}
if(k!=iter)
{
lis_matvect(precon->A,X,R);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for(i=0;i<n;i++)
{
r[i] = b[i] - r[i];
}
}
}
}
else
{
lis_vector_set_all(0.0,X);
lis_vector_copy(B,R);
for(k=0;k<iter+1;k++)
{
for(i=n;i<np;i++)
{
r[i] = 0.0;
}
lis_psolvet_xxx[ptype](solver,R,W);
for(i=0;i<n;i++)
{
x[i] += w[i];
}
if(k==iter) break;
X->precision = LIS_PRECISION_DEFAULT;
lis_matvect(precon->A,X,R);
X->precision = LIS_PRECISION_QUAD;
for(i=0;i<n;i++)
{
r[i] = b[i] - r[i];
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
right_synch_p2p_dataflow.c | /*
* This file is part of a small series of tutorial,
* which aims to demonstrate key features of the GASPI
* standard by means of small but expandable examples.
* Conceptually the tutorial follows a MPI course
* developed by EPCC and HLRS.
*
* Contact point for the MPI tutorial:
* rabenseifner@hlrs.de
* Contact point for the GASPI tutorial:
* daniel.gruenewald@itwm.fraunhofer.de
* mirko.rahn@itwm.fraunhofer.de
* christian.simmendinger@t-systems.com
*/
#include "assert.h"
#include "constant.h"
#include "data.h"
#include "topology.h"
#include "now.h"
#include "mm_pause.h"
#include "success_or_die.h"
#include "queue.h"
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/* global stage counters for comp */
static volatile counter_t *compStage = NULL;
#define MIN(x,y) ((x)<(y)?(x):(y))
int main (int argc, char *argv[])
{
int i, j;
int nProc, iProc;
int provided, required = MPI_THREAD_MULTIPLE;
MPI_Init_thread(&argc, &argv, required, &provided);
ASSERT(required == MPI_THREAD_MULTIPLE);
MPI_Comm_rank (MPI_COMM_WORLD, &iProc);
MPI_Comm_size (MPI_COMM_WORLD, &nProc);
gaspi_rank_t iProcG, nProcG;
SUCCESS_OR_DIE (gaspi_proc_init (GASPI_BLOCK));
SUCCESS_OR_DIE (gaspi_proc_rank (&iProcG));
SUCCESS_OR_DIE (gaspi_proc_num (&nProcG));
ASSERT(iProc == iProcG);
ASSERT(nProc == nProcG);
gaspi_number_t notification_num;
SUCCESS_OR_DIE (gaspi_notification_num (¬ification_num));
ASSERT(K_SZ*nThreads <= notification_num);
// num threads
omp_set_num_threads(nThreads);
// global stage counter
compStage = malloc(nThreads * sizeof(counter_t));
// left, right neighbour (proc)
const int left = LEFT(iProc);
const int right = RIGHT(iProc);
// assignment per proc, i-direction
#ifdef USE_STRONG_SCALING
int mSize = M_SZ/nProc;
if (M_SZ % nProc != 0)
{
mSize++;
}
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ);
mSize = mStop-mStart+1;
#else
int mSize = M_SZ;
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ*nProc);
mSize = mStop-mStart+1;
#endif
// align local array
const int CL_SZ = ((mSize+1) % CL) == 0 ? (mSize+1) : CL*(1+(mSize+1)/CL);
// allocate segment for array
gaspi_segment_id_t const segment_id = 0;
SUCCESS_OR_DIE ( gaspi_segment_create
( segment_id
, CL_SZ * (nThreads+1) * (K_SZ+1) * sizeof (double)
, GASPI_GROUP_ALL
, GASPI_BLOCK
, GASPI_MEM_UNINITIALIZED
));
gaspi_pointer_t array;
SUCCESS_OR_DIE ( gaspi_segment_ptr ( segment_id, &array) );
ASSERT (array != 0);
#pragma omp parallel default (none) shared(compStage, CL_SZ, \
mSize, array, stdout, stderr)
{
int const tid = omp_get_thread_num();
compStage[tid].global = 0;
// initialize data
data_init_tlocal(mSize, tid, array, CL_SZ);
}
data_init_global(mStart, mSize, iProc, array, CL_SZ);
int iter;
double median[NITER];
for (iter = 0; iter < NITER; iter++)
{
double time = -now();
MPI_Barrier(MPI_COMM_WORLD);
#pragma omp parallel default (none) shared(mStart, mSize, \
compStage, nThreads, iProc, nProc, stdout, stderr, array, CL_SZ)
{
int const tid = omp_get_thread_num();
gaspi_queue_id_t queue_id = 0;
int k;
for (k = 1; k <= K_SZ; k++)
{
if (left >= 0 )
{
gaspi_notification_id_t id, data_available = (k-1)*nThreads+tid;
SUCCESS_OR_DIE(gaspi_notify_waitsome (segment_id
, data_available
, 1
, &id
, GASPI_BLOCK
));
ASSERT (id == data_available);
gaspi_notification_t value;
SUCCESS_OR_DIE (gaspi_notify_reset (segment_id
, id
, &value
));
ASSERT (value == 1);
}
if(tid > 0)
{
volatile int it;
while((it = compStage[tid-1].global) <= compStage[tid].global)
{
_mm_pause();
}
}
// compute */
data_compute (mStart, mSize, tid, k, array, CL_SZ);
/* increase stage counter */
compStage[tid].global++;
// issue send
if (right < nProc)
{
gaspi_notification_id_t data_available = (k-1)*nThreads+tid;
wait_for_queue_entries_for_write_notify(&queue_id);
SUCCESS_OR_DIE ( gaspi_write_notify
( segment_id
, array_OFFSET (mSize, tid+1, k)
, right
, segment_id
, array_OFFSET (0, tid+1, k)
, sizeof (double)
, data_available
, 1
, queue_id
, GASPI_BLOCK
));
}
#ifdef USE_OMP_BARRIER
#pragma omp barrier
#endif
}
}
MPI_Barrier(MPI_COMM_WORLD);
time += now();
/* iteration time */
median[iter] = time;
}
MPI_Barrier(MPI_COMM_WORLD);
// validate */
#pragma omp parallel default (none) shared(mStart, array, CL_SZ, mSize)
{
int const tid = omp_get_thread_num();
data_validate (mStart, mSize, tid, K_SZ, array, CL_SZ);;
}
MPI_Barrier(MPI_COMM_WORLD);
sort_median(&median[0], &median[NITER-1]);
printf ("# gaspi %s nProc: %d nThreads: %d M_SZ: %d K_SZ: %d niter: %d time: %g\n"
, argv[0], nProc, nThreads, M_SZ, K_SZ, NITER, median[NITER/2]
);
if (iProc == nProc-1)
{
double res = 1.0E-06 * 4 * mSize*nThreads*K_SZ*nProc / median[NITER/2];
printf("\nRate (MFlops/s): %lf\n",res);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return EXIT_SUCCESS;
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class UntiedTaskLocalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>> &LocalVars);
~UntiedTaskLocalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Maps function to the position of the untied task locals stack.
llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Get the function for the specified user-defined mapper. If it does not
/// exist, create one.
llvm::Function *
getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
/// Set to true if Clang emits separate runtime calls for the beginning and
/// end of the region. These calls might have separate map type arrays.
bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library for the beginning
/// of the region or for the entire region if there are no separate map
/// types for the region end.
llvm::Value *MapTypesArray = nullptr;
/// The array of map types passed to the runtime library for the end of the
/// region, or nullptr if there are no separate map types for the region
/// end.
llvm::Value *MapTypesArrayEnd = nullptr;
/// The array of user-defined mappers passed to the runtime library.
llvm::Value *MappersArray = nullptr;
/// Indicate whether any user-defined mapper exists.
bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo),
SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
MapTypesArrayEnd = nullptr;
MappersArray = nullptr;
HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
/// Returns true if the variable is a local variable in untied task.
bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
Utilities.h | #pragma once
#include <functional>
#include <iostream>
#include "Defs.h"
inline int ceiling_div(const int x, const int y)
{
return (x + y - 1) / y;
}
template <typename T>
inline void foreach(const T* src, int w, int h, std::function<void(int, int, T)> f) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
f(x, y, src[y*w + x]);
}
}
}
template <typename T>
inline void printBuffer(const T* src, int w, int h) {
std::function<void(int, int, float)> print = [](int x, int y, float val) {
std::cout << x << ", " << y << ": " << val << std::endl;
};
foreach<T>(src, w, h, print);
}
template <typename T>
void fill(T* dest, int w, int h, const T value) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int idx = y * w + x;
dest[idx] = value;
}
}
}
template <typename T>
inline void map_index(T* src, T* dest, int w, int h, std::function<T(int, int)> f) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int idx = y * w + x;
dest[idx] = f(x, y);
}
}
}
template <typename T>
inline void fill_example(T* src, int w, int h) {
auto create = [](int x, int y) { return (x + 1)*(y + 1); };
map_index<T>(src, src, w, h, create);
}
template <typename T>
inline void par_map_index(T* src, T* dest, int w, int h, std::function<T(int, int)> f) {
#pragma omp parallel for
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
const int idx = y * w + x;
dest[idx] = f(x, y);
}
}
}
template <typename T>
inline void par_fill_example(T* src, int w, int h) {
auto create = [](int x, int y) { return (T(x) + 1)*(T(y) + 1); };
par_map_index<T>(src, src, w, h, create);
}
|
ctegen2.c | #include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
# ifdef _OPENMP
#include <omp.h>
# endif
#include "hstcal_memory.h"
#include "ctegen2.h"
#include "hstcalerr.h"
#include "hstcal.h"
#include "trlbuf.h"
static void setAtomicFlag(Bool * atom)
{
if (!atom)
return;
#ifdef _OPENMP
#pragma omp critical(critSecAtomicFlag)
#endif
{
*atom = True;
}
}
static void setAtomicInt(int * atom, const int value)
{
if (!atom)
return;
#ifdef _OPENMP
#pragma omp critical(critSecAtomicInt)
#endif
{
*atom = value;
}
}
int forwardModel(const SingleGroup * input, SingleGroup * output, SingleGroup * trapPixelMap, CTEParamsFast * ctePars)
{
extern int status;
if (!input || !output || !trapPixelMap || !ctePars)
return (status = ALLOCATION_PROBLEM);
//WARNING - assumes column major storage order
assert(trapPixelMap->sci.data.storageOrder == COLUMNMAJOR);
assert(input->sci.data.storageOrder == COLUMNMAJOR);
output->sci.data.storageOrder = COLUMNMAJOR;
const unsigned nRows = output->sci.data.ny;
const unsigned nColumns = output->sci.data.nx;
const FloatTwoDArray * cteRprof = &ctePars->rprof->data;
const FloatTwoDArray * cteCprof = &ctePars->cprof->data;
Bool allocationFail = False;
Bool runtimeFail = False;
#ifdef _OPENMP
#pragma omp parallel shared(input, output, ctePars, cteRprof, cteCprof, trapPixelMap, allocationFail, runtimeFail, status)
#endif
{
int localStatus = HSTCAL_OK; //Note: used to set extern int status atomically, note global status takes last set value
//Thread local pointer register
PtrRegister localPtrReg;
initPtrRegister(&localPtrReg);
double * model = malloc(sizeof(*model)*nRows);
addPtr(&localPtrReg, model, &free);
if (!model)
setAtomicFlag(&allocationFail);
float * traps = NULL;
//Allocate all local memory before anyone proceeds
#ifdef _OPENMP
#pragma omp barrier
#endif
if (!allocationFail)
{
{unsigned j;
#ifdef _OPENMP
#pragma omp for schedule(dynamic)
#endif
for (j = 0; j < nColumns; ++j)
{
// Can't use memcpy as diff types
// Do in place (in a distributed context)
{unsigned i;
for (i = 0; i < nRows; ++i)
model[i] = PixColumnMajor(input->sci.data,i,j);
}
traps = &(PixColumnMajor(trapPixelMap->sci.data, 0, j));
if ((localStatus = simulateColumnReadout(model, traps, ctePars, cteRprof, cteCprof, nRows, ctePars->n_par)))
{
setAtomicFlag(&runtimeFail);
setAtomicInt(&status, localStatus);
}
// Update source array
// Can't use memcpy as arrays of diff types
{unsigned i;
for (i = 0; i < nRows; ++i)
PixColumnMajor(output->sci.data, i, j) = model[i];
}
}} //end loop over columns
}
freeOnExit(&localPtrReg);
}// close scope for #pragma omp parallel
if (allocationFail)
{
sprintf(MsgText, "Out of memory in inverseCTEBlur()");
trlerror(MsgText);
return (status = OUT_OF_MEMORY);
}
if (runtimeFail)
{
sprintf(MsgText, "Runtime fail in inverseCTEBlur()");
trlerror(MsgText);
return status;
}
return (status);
}
int inverseCTEBlur(const SingleGroup * input, SingleGroup * output, SingleGroup * trapPixelMap, CTEParamsFast * ctePars)
{
extern int status;
if (!input || !output || !trapPixelMap || !ctePars)
return (status = ALLOCATION_PROBLEM);
//WARNING - assumes column major storage order
assert(trapPixelMap->sci.data.storageOrder == COLUMNMAJOR);
assert(input->sci.data.storageOrder == COLUMNMAJOR);
output->sci.data.storageOrder = COLUMNMAJOR;
const unsigned nRows = output->sci.data.ny;
const unsigned nColumns = output->sci.data.nx;
const double rnAmp2 = ctePars->rn_amp * ctePars->rn_amp;
const FloatTwoDArray * cteRprof = &ctePars->rprof->data;
const FloatTwoDArray * cteCprof = &ctePars->cprof->data;
Bool allocationFail = False;
Bool runtimeFail = False;
#ifdef _OPENMP
#pragma omp parallel shared(input, output, ctePars, cteRprof, cteCprof, trapPixelMap, allocationFail, runtimeFail, status)
#endif
{
int localStatus = HSTCAL_OK; //Note: used to set extern int status atomically, note global status takes last set value
//Thread local pointer register
PtrRegister localPtrReg;
initPtrRegister(&localPtrReg);
double * model = malloc(sizeof(*model)*nRows);
addPtr(&localPtrReg, model, &free);
if (!model)
setAtomicFlag(&allocationFail);
double * tempModel = NULL;
if (!allocationFail)
tempModel = malloc(sizeof(*tempModel)*nRows);
addPtr(&localPtrReg, tempModel, &free);
if (!tempModel)
setAtomicFlag(&allocationFail);
double * observed = NULL;
if (!allocationFail)
observed = malloc(sizeof(*observed)*nRows);
addPtr(&localPtrReg, observed, &free);
if (!observed)
setAtomicFlag(&allocationFail);
float * traps = NULL;
//Allocate all local memory before anyone proceeds
#ifdef _OPENMP
#pragma omp barrier
#endif
if (!allocationFail)
{
Bool localOK = True;
{unsigned j;
#ifdef _OPENMP
#pragma omp for schedule(dynamic)
#endif
for (j = 0; j < nColumns; ++j)
{
// Can't use memcpy as diff types
{unsigned i;
for (i = 0; i < nRows; ++i)
observed[i] = PixColumnMajor(input->sci.data,i,j);
}
traps = &(PixColumnMajor(trapPixelMap->sci.data, 0, j));
unsigned NREDO = 0;
Bool REDO;
do
{
REDO = False; /*START OUT NOT NEEDING TO MITIGATE CRS*/
/*STARTING WITH THE OBSERVED IMAGE AS MODEL, ADOPT THE SCALING FOR THIS COLUMN*/
memcpy(model, observed, nRows*sizeof(*observed));
/*START WITH THE INPUT ARRAY BEING THE LAST OUTPUT
IF WE'VE CR-RESCALED, THEN IMPLEMENT CTEF*/
{unsigned NITINV;
for (NITINV = 1; NITINV <= ctePars->n_forward - 1; ++NITINV)
{
memcpy(tempModel, model, nRows*sizeof(*model));
if ((localStatus = simulateColumnReadout(model, traps, ctePars, cteRprof, cteCprof, nRows, ctePars->n_par)))
{
setAtomicFlag(&runtimeFail);
setAtomicInt(&status, localStatus);
localOK = False;
break;
}
//Now that the updated readout has been simulated, subtract this from the model
//to reproduce the actual image, without the CTE trails.
//Whilst doing so, DAMPEN THE ADJUSTMENT IF IT IS CLOSE TO THE READNOISE, THIS IS
//AN ADDITIONAL AID IN MITIGATING THE IMPACT OF READNOISE
{unsigned i;
for (i = 0; i < nRows; ++i)
{
double delta = model[i] - observed[i];
double delta2 = delta * delta;
//DAMPEN THE ADJUSTMENT IF IT IS CLOSE TO THE READNOISE
delta *= delta2 / (delta2 + rnAmp2);
//Now subtract the simulated readout
model[i] = tempModel[i] - delta;
}}
}}
if (!localOK)
break;
//Do the last forward iteration but don't dampen... no idea why???
memcpy(tempModel, model, sizeof(*model)*nRows);
if ((localStatus = simulateColumnReadout(model, traps, ctePars, cteRprof, cteCprof, nRows, ctePars->n_par)))
{
setAtomicFlag(&runtimeFail);
setAtomicInt(&status, localStatus);
localOK = False;
break;
}
//Now subtract the simulated readout
{unsigned i;
for (i = 0; i < nRows; ++i)
model[i] = tempModel[i] - (model[i] - observed[i]);
}
REDO = ctePars->fix_rocr ? correctCROverSubtraction(traps, model, observed, nRows,
ctePars->thresh) : False;
} while (localOK && REDO && ++NREDO < 5); //If really wanting 5 re-runs then use NREDO++
// Update source array
// Can't use memcpy as arrays of diff types
{unsigned i;
for (i = 0; i < nRows; ++i)
PixColumnMajor(output->sci.data, i, j) = model[i];
}
}} //end loop over columns
}
freeOnExit(&localPtrReg);
}// close scope for #pragma omp parallel
if (allocationFail)
{
sprintf(MsgText, "Out of memory in inverseCTEBlur()");
trlerror(MsgText);
return (status = OUT_OF_MEMORY);
}
if (runtimeFail)
{
sprintf(MsgText, "Runtime fail in inverseCTEBlur()");
trlerror(MsgText);
return status;
}
return (status);
}
int simulatePixelReadout_v1_1(double * const pixelColumn, const float * const traps, const CTEParamsFast * const ctePars,
const FloatTwoDArray * const rprof, const FloatTwoDArray * const cprof, const unsigned nRows)
{
//For performance this does not NULL check passed in ptrs
double chargeToAdd;
double extraChargeToAdd;
double chargeToRemove;
double pixel;
double releasedFlux;
double trappedFlux;
int nTransfersFromTrap;
/*FIGURE OUT WHICH TRAPS WE DON'T NEED TO WORRY ABOUT IN THIS COLUMN
PMAX SHOULD ALWAYS BE POSITIVE HERE*/
//Look into whether this really has to be computed each iteration?
//Since this is simulating the readout and thus moving pixels down and out, pmax can only get smaller with
//each pixel transfer, never greater.
double maxPixel = 10;
{unsigned i;
for (i = 0; i < nRows; ++i)
maxPixel = pixelColumn[i] > maxPixel ? pixelColumn[i] : maxPixel;
}
//Find highest charge trap to not exceed i.e. map pmax to an index
unsigned maxChargeTrapIndex = ctePars->cte_traps-1;
{int w;
for (w = maxChargeTrapIndex; w >= 0; --w)//go up or down? (if swap, change below condition)
{
if (ctePars->qlevq_data[w] <= maxPixel)//is any of this even needed or can we just directly map?
{
maxChargeTrapIndex = w;
break;
}
}}
/*GO THROUGH THE TRAPS ONE AT A TIME, FROM HIGHEST TO LOWEST Q,
AND SEE WHEN THEY GET FILLED AND EMPTIED, ADJUST THE PIXELS ACCORDINGLY*/
{int w;
for (w = maxChargeTrapIndex; w >= 0; --w)
{
nTransfersFromTrap = ctePars->cte_len; //for referencing the image at 0
trappedFlux = 0;
releasedFlux = 0;
/*GO UP THE COLUMN PIXEL BY PIXEL*/
{unsigned i;
for (i = 0; i < nRows; ++i)
{
pixel = pixelColumn[i];
Bool isInsideTrailLength = nTransfersFromTrap < ctePars->cte_len;
Bool isAboveChargeThreshold = pixel >= ctePars->qlevq_data[w] - 1.;
if (!isInsideTrailLength && !isAboveChargeThreshold)
continue;
if (pixelColumn[i] >= 0 )//seems a shame to check this every iteration
{
pixel = pixelColumn[i] + releasedFlux; /*shuffle charge in*/
double floored = floor(pixel);
releasedFlux = pixel - floored; /*carry the charge remainder*/
pixel = floored; /*reset pixel*/
}
/*HAPPENS AFTER FIRST PASS*/
/*SHUFFLE CHARGE IN*/
//move out of loop to separate instance?
if (i > 0)
{
if ((double)traps[i] < (double)traps[i-1])
trappedFlux *= ((double)traps[i] / (double)traps[i-1]);
}
/*RELEASE THE CHARGE*/
chargeToAdd = 0;
if (isInsideTrailLength)
{
++nTransfersFromTrap;
chargeToAdd = rprof->data[w*rprof->ny + nTransfersFromTrap-1] * trappedFlux;
}
extraChargeToAdd = 0;
chargeToRemove = 0;
if (pixel >= ctePars->qlevq_data[w])
{
chargeToRemove = ctePars->dpdew_data[w] / ctePars->n_par * (double)traps[i]; /*dpdew is 1 in file */
if (nTransfersFromTrap < ctePars->cte_len)
extraChargeToAdd = cprof->data[w*cprof->ny + nTransfersFromTrap-1] * trappedFlux; //ttrap-1 may not be the same index as ref'd in rprof???
nTransfersFromTrap = 0;
trappedFlux = chargeToRemove;
}
pixelColumn[i] += chargeToAdd + extraChargeToAdd - chargeToRemove;
}} //end for i
}} //end for w
return HSTCAL_OK;
}
int simulatePixelReadout_v1_2(double * const pixelColumn, const float * const traps, const CTEParamsFast * const ctePars,
const FloatTwoDArray * const rprof, const FloatTwoDArray * const cprof, const unsigned nRows)
{
//NOTE: this version of the function, simulatePixelReadout, matches Jay Anderson's update
//to the algorithm (https://github.com/spacetelescope/hstcal/issues/48).
//For performance this does not NULL check passed in ptrs
double chargeToAdd;
double extraChargeToAdd;
double chargeToRemove;
double pixel;
double trappedFlux;
int nTransfersFromTrap;
/*FIGURE OUT WHICH TRAPS WE DON'T NEED TO WORRY ABOUT IN THIS COLUMN
PMAX SHOULD ALWAYS BE POSITIVE HERE*/
//Look into whether this really has to be computed each iteration?
//Since this is simulating the readout and thus moving pixels down and out, pmax can only get smaller with
//each pixel transfer, never greater.
double maxPixel = 10;
{unsigned i;
for (i = 0; i < nRows; ++i)
maxPixel = pixelColumn[i] > maxPixel ? pixelColumn[i] : maxPixel;
}
//Find highest charge trap to not exceed i.e. map pmax to an index
unsigned maxChargeTrapIndex = ctePars->cte_traps-1;
{int w;
for (w = maxChargeTrapIndex; w >= 0; --w)//go up or down? (if swap, change below condition)
{
if (ctePars->qlevq_data[w] <= maxPixel)//is any of this even needed or can we just directly map?
{
maxChargeTrapIndex = w;
break;
}
}}
/*GO THROUGH THE TRAPS ONE AT A TIME, FROM HIGHEST TO LOWEST Q,
AND SEE WHEN THEY GET FILLED AND EMPTIED, ADJUST THE PIXELS ACCORDINGLY*/
{int w;
for (w = maxChargeTrapIndex; w >= 0; --w)
{
nTransfersFromTrap = ctePars->cte_len; //for referencing the image at 0
trappedFlux = 0;
/*GO UP THE COLUMN PIXEL BY PIXEL*/
{unsigned i;
for (i = 0; i < nRows; ++i)
{
pixel = pixelColumn[i];
Bool isInsideTrailLength = nTransfersFromTrap < ctePars->cte_len;
//check if this are needed
chargeToAdd = 0;
extraChargeToAdd = 0;
chargeToRemove = 0;
/*HAPPENS AFTER FIRST PASS*/
/*SHUFFLE CHARGE IN*/
//move out of loop to separate instance?
if (i > 0)
{
if ((double)traps[i] < (double)traps[i-1])
trappedFlux *= ((double)traps[i] / (double)traps[i-1]);
}
if (pixel >= ctePars->qlevq_data[w])
{
if (isInsideTrailLength)
{
++nTransfersFromTrap;
chargeToAdd = rprof->data[w*rprof->ny + nTransfersFromTrap-1] * trappedFlux;
extraChargeToAdd = cprof->data[w*cprof->ny + nTransfersFromTrap-1] * trappedFlux;
}
trappedFlux = ctePars->dpdew_data[w] / ctePars->n_par * (double)traps[i];
chargeToRemove = trappedFlux;
nTransfersFromTrap = 0;
}
else
{
if (isInsideTrailLength)
{
++nTransfersFromTrap;
chargeToAdd = rprof->data[w*rprof->ny + nTransfersFromTrap-1] * trappedFlux;
}
}
pixelColumn[i] += chargeToAdd + extraChargeToAdd - chargeToRemove;
}} //end for i
}} //end for w
return HSTCAL_OK;
}
int simulateColumnReadout(double * const pixelColumn, const float * const traps, const CTEParamsFast * const cte,
const FloatTwoDArray * const rprof, const FloatTwoDArray * const cprof, const unsigned nRows, const unsigned nPixelShifts)
{
//For performance this does not NULL check passed in ptrs
int localStatus = HSTCAL_OK;
//Take each pixel down the detector
{unsigned shift;
for (shift = 1; shift <= nPixelShifts; ++shift)
{
if ((localStatus = simulatePixelReadout_v1_2(pixelColumn, traps, cte, rprof, cprof, nRows)))
return localStatus;
}}
return localStatus;
}
Bool correctCROverSubtraction(float * const traps, const double * const pix_model, const double * const pix_observed,
const unsigned nRows, const double threshHold)
{
/*LOOK FOR AND DOWNSCALE THE CTE MODEL IF WE FIND
THE TELL-TALE SIGN OF READOUT CRS BEING OVERSUBTRACTED;
IF WE FIND ANY THEN GO BACK UP AND RERUN THIS COLUMN
THIS MODEL SEARCHES FOR OVERSUBTRACTED TRAILS.
WHICH ARE DEFINED AS EITHER:
- A SINGLE PIXEL VALUE BELOW -10E-
- TWO CONSECUTIVE PIXELS TOTALING -12 E-
- THREE TOTALLING -15 E-
WHEN WE DETECT SUCH AN OVER-SUBTRACTED TAIL, WE ITERATIVELY REDUCE
THE LOCAL CTE SCALING BY 25% UNTIL THE TRAIL IS
NO LONGER NEGATIVE THIS DOES NOT IDENTIFY ALL READOUT-CRS, BUT IT DOES
DEAL WITH MANY OF THEM. FOR IMAGES THAT HAVE BACKGROUND GREATER THAN 10 OR SO,
THIS WILL STILL END UP OVERSUBTRACTING CRS A BIT, SINCE WE ALLOW
THEIR TRAILS TO BE SUBTRACTED DOWN TO -10 RATHER THAN 0.
*/
//For performance this does not NULL check passed in ptrs
Bool redo = False;
{unsigned i;
for (i = 10; i < nRows-2; ++i)
{
if ( (( threshHold > pix_model[i] ) &&
( threshHold > (pix_model[i] - pix_observed[i]))) ||
(((pix_model[i] + pix_model[i+1]) < -12.) &&
(pix_model[i] + pix_model[i+1] - pix_observed[i] - pix_observed[i+1] < -12.)) ||
(((pix_model[i] + pix_model[i+1] + pix_model[i+2]) < -15.) &&
((pix_model[i] + pix_model[i+1] + pix_model[i+2] - pix_observed[i] -
pix_observed[i+1] - pix_observed[i+2]) < -15.)) )
{
redo = True;
unsigned iMax = i;
/*GO DOWNSTREAM AND LOOK FOR THE OFFENDING CR*/
{unsigned ii;
for (ii = i-10; ii <= i; ++ii)
{
if ( (pix_model[ii] - pix_observed[ii]) > (pix_model[iMax] - pix_observed[iMax]) )
iMax = ii;
}}
/* DOWNGRADE THE CR'S SCALING AND ALSO FOR THOSE
BETWEEN THE OVERSUBTRACTED PIXEL AND IT*/
{unsigned ii;
for (ii = iMax; ii <= i; ++ii)
traps[ii] *= 0.75;
}
}
}} /*end for j*/
return redo;
}
int populateTrapPixelMap(SingleGroup * trapPixelMap, CTEParamsFast * ctePars)
{
/*
int iz_data[<cte->nScaleTableColumns>]; column number in raz format
double scale512[<cte->nScaleTableColumns>]; scaling appropriate at row 512
double scale1024[<cte->nScaleTableColumns>]; scaling appropriate at row 1024
double scale1536[<cte->nScaleTableColumns>]; scaling appropriate at row 1536
double scale2048[<cte->nScaleTableColumns>]; scaling appropriate at row 2048
*/
//For performance this does not NULL check passed in ptrs
//WARNING - OUTPUTS column major storage order
trapPixelMap->sci.data.storageOrder = COLUMNMAJOR;
clock_t begin = clock();
extern int status;
const unsigned nRows = trapPixelMap->sci.data.ny;
const unsigned nColumns = trapPixelMap->sci.data.nx;
const double cteScale = ctePars->scale_frac;
#ifdef _OPENMP
#pragma omp parallel shared(trapPixelMap, ctePars)
#endif
{
double trapColumnScale[4];
double cte_i;
double cte_j;
double ro;
int io;
{unsigned i;
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (i = 0; i < ctePars->nScaleTableColumns; ++i)
{
unsigned column = ctePars->iz_data[i] - ctePars->razColumnOffset; //which column to scale
if (column < 0 || column >= nColumns)//vec blocker
continue;
trapColumnScale[0] = ctePars->scale512[i];
trapColumnScale[1] = ctePars->scale1024[i];
trapColumnScale[2] = ctePars->scale1536[i];
trapColumnScale[3] = ctePars->scale2048[i];
//CALCULATE THE CTE CORRECTION FOR EVERY PIXEL
// Index is figured on the final size of the image
// not the current size.
{unsigned j;
for (j = 0; j < nRows; ++j)
{
ro = j / 512.0; //ro can be zero, it's an index
if (ro > 2.999)
ro = 2.999; // only 4 quads, 0 to 3
else if (ro < 0)
ro = 0;
io = (int) floor(ro); //force truncation towards 0 for pos numbers
cte_j = (j+1) / 2048.0;
cte_i = trapColumnScale[io] + (trapColumnScale[io+1] - trapColumnScale[io]) * (ro - io);
PixColumnMajor(trapPixelMap->sci.data, j, column) = cte_i * cte_j * cteScale;
}}
}}
} // end parallel block
if (ctePars->verbose)
{
double timeSpent = ((double)(clock() - begin))/CLOCKS_PER_SEC;
sprintf(MsgText,"(pctecorr) Time taken to populate pixel trap map image: %.2f(s) with %i threads",timeSpent/ctePars->maxThreads, ctePars->maxThreads);
trlmessage(MsgText);
}
return(status);
}
int cteSmoothImage(const SingleGroup * input, SingleGroup * output, CTEParamsFast * ctePars, double ampReadNoise)
{
/*
This routine will output the smoothest
image that is consistent with being the observed image plus readnoise.
This is necessary because we want the CTE-correction algorithm to produce the smoothest
possible reconstruction, consistent with the original image and the
known readnoise. This algorithm constructs a model that is smooth
where the pixel-to-pixel variations can be thought of as being related
to readnoise, but if the variations are too large, then it respects
the pixel values. Basically... it uses a 2-sigma threshold.
This is strategy #1 in a two-pronged strategy to mitigate the readnoise
amplification. Strategy #2 will be to not iterate when the deblurring
is less than the readnoise.
*/
extern int status;
if (!input || !output || !ctePars)
return (status = ALLOCATION_PROBLEM);
//WARNING - assumes column major storage order
assert(input->sci.data.storageOrder == COLUMNMAJOR);
output->sci.data.storageOrder = COLUMNMAJOR;
extern int status;
const unsigned nRows = input->sci.data.ny;
const unsigned nColumns = input->sci.data.nx;
double rmsGlobal=0;
double nrmsGlobal=0;
clock_t begin = clock();
copySingleGroup(output, input, input->sci.data.storageOrder);
//Is the readnoise diff per amp? Current method assumes not.
if (ampReadNoise < 0.1){
trlmessage("rnsig < 0.1, No read-noise mitigation needed");
return(status);
}
/*GO THROUGH THE ENTIRE IMAGE AND ADJUST PIXELS TO MAKE THEM
SMOOTHER, BUT NOT SO MUCH THAT IT IS NOT CONSISTENT WITH
READNOISE. DO THIS IN BABY STEPS SO THAT EACH ITERATION
DOES VERY LITTLE ADJUSTMENT AND INFORMATION CAN GET PROPAGATED
DOWN THE LINE.
*/
//To remove the below code adjust in place i.e. using only output:
//Don't use pointers to output for obs_loc & rsz_loc
//Copy columns and then just shift these copies (boundary case might be annoying)
//Use schedule(static) and pre (inner for loop) copy boundary columns to avoid race conditions
SingleGroup adjustment;
initSingleGroup(&adjustment);
allocSingleGroup(&adjustment, nColumns, nRows, False);
SingleGroup readNoise;
initSingleGroup(&readNoise);
allocSingleGroup(&readNoise, nColumns, nRows, False);
#ifdef _OPENMP
#pragma omp parallel shared(input, output, ampReadNoise, rmsGlobal, nrmsGlobal, readNoise)
#endif
{
const float * obs_loc[3];
const float * rsz_loc[3];
double rmsLocal;
double nrmsLocal;
{unsigned iter;
for (iter = 0; iter < 100; ++iter)
{
rmsLocal = 0;
nrmsLocal = 0;
{unsigned i;
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (i = 0; i < nColumns; ++i)
{
unsigned imid = i;
/*RESET TO MIDDLE nColumns AT ENDPOINTS*/
// This seems odd, the edge columns get accounted for twice?
if (i == 0)
imid = 1;
else if (i == nColumns-1) // NOTE: use of elseif breaks if nColumns = 1
imid = nColumns-2;
/*LOCATE THE MIDDLE AND NEIGHBORING PIXELS FOR ANALYSIS*/
obs_loc[0] = input->sci.data.data + (imid-1)*nRows;
obs_loc[1] = obs_loc[0] + nRows;
obs_loc[2] = obs_loc[1] + nRows;
rsz_loc[0] = output->sci.data.data + (imid-1)*nRows;
rsz_loc[1] = rsz_loc[0] + nRows;
rsz_loc[2] = rsz_loc[1] + nRows;
{unsigned j;
for (j = 0; j < nRows; ++j)
PixColumnMajor(adjustment.sci.data, j, i) = find_dadjFast(1+i-imid, j, nRows, obs_loc, rsz_loc, ampReadNoise);
}
}} /*end the parallel for*/ //implicit omp barrier
//NOW GO OVER ALL THE nColumns AND nRows AGAIN TO SCALE THE PIXELS
{unsigned i;
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (i = 0; i < nColumns; ++i)
{
{unsigned j;
for(j = 0; j < nRows; ++j)
{
PixColumnMajor(output->sci.data,j,i) += (PixColumnMajor(adjustment.sci.data,j, i)*0.75);
PixColumnMajor(readNoise.sci.data,j,i) = (PixColumnMajor(input->sci.data,j,i) - PixColumnMajor(output->sci.data,j,i));
}}
}}//implicit omp barrier
#ifdef _OPENMP
#pragma omp single
#endif
{
rmsGlobal=0;
nrmsGlobal=0;
}
{unsigned j;
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (j = 0; j < nColumns; ++j)
{
{unsigned i;
for (i = 0; i < nRows; ++i)
{
if ( (fabs(PixColumnMajor(input->sci.data, i, j)) > 0.1 ||
fabs(PixColumnMajor(output->sci.data, i, j)) > 0.1))
{
double tmp = PixColumnMajor(readNoise.sci.data, i, j);
rmsLocal += tmp*tmp;
++nrmsLocal;
}
}}
}}//implicit omp barrier
#ifdef _OPENMP
#pragma omp critical (aggregate)
#endif
{
rmsGlobal += rmsLocal;
nrmsGlobal += nrmsLocal;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
#ifdef _OPENMP
#pragma omp single
#endif
{
rmsGlobal = sqrt(rmsGlobal/nrmsGlobal);
} //implicit barrier
// if it is true that one breaks then it is true for all
/*epsilon type comparison*/
if ((ampReadNoise - rmsGlobal) < 0.00001)
break; // this exits loop over iter
#ifdef _OPENMP
#pragma omp barrier
#endif
}} // end loop over iter
} // close parallel block
freeSingleGroup(&adjustment);
freeSingleGroup(&readNoise);
if (ctePars->verbose)
{
double timeSpent = ((double)(clock() - begin))/CLOCKS_PER_SEC;
sprintf(MsgText,"(pctecorr) Time taken to smooth image: %.2f(s) with %i threads", timeSpent/ctePars->maxThreads, ctePars->maxThreads);
trlmessage(MsgText);
}
return (status);
}
double find_dadjFast(const unsigned i, const unsigned j, const unsigned nRows, const float * obsloc[3], const float * rszloc[3], const double readNoiseAmp)
{
/*
This function determines for a given pixel how it can
adjust in a way that is not inconsistent with its being
readnoise. To do this, it looks at its upper and lower
neighbors and sees whether it is consistent with either
(modulo readnoise). To the extent that it is consistent
then move it towards them. But also bear in mind that
that we don't want it to be more than 2 RN sigmas away
from its original value. This is pretty much a tug of
war... with readnoise considerations pushing pixels to
be closer to their neighbors, but the original pixel
values also pull to keep the pixel where it was. Some
accommodation is made for both considerations.
*/
//For performance this does not NULL check passed in ptrs
const double mval = (double)*(rszloc[i] + j);
const double dval0 = (double)*(obsloc[i] + j) - mval;
double dval0u = dval0;
if (dval0u > 1)
dval0u = 1;
else if (dval0u < -1)
dval0u = -1;
/*COMPARE THE SURROUNDING PIXELS*/
double dval9 = 0.0;
if (i == 1 && j < nRows-1 && j > 0)
{
dval9 = (double)*(obsloc[i] + j-1) - (double)*(rszloc[i] + j-1) +
(double)*(obsloc[i] + j) - (double)*(rszloc[i] + j) +
(double)*(obsloc[i] + j+1) - (double)*(rszloc[i] + j+1) +
(double)*(obsloc[i-1] + j-1) - (double)*(rszloc[i-1] + j-1) +
(double)*(obsloc[i-1] + j) - (double)*(rszloc[i-1] + j) +
(double)*(obsloc[i-1] + j+1) - (double)*(rszloc[i-1] + j+1) +
(double)*(obsloc[i+1] + j-1) - (double)*(rszloc[i+1] + j-1) +
(double)*(obsloc[i+1] + j) - (double)*(rszloc[i+1] + j) +
(double)*(obsloc[i+1] + j+1) - (double)*(rszloc[i+1] + j+1);
dval9 = dval9 / 9.0;
}
const double readNoiseAmpFraction = 0.33;
double dval9u = dval9;
if (dval9u > readNoiseAmp*readNoiseAmpFraction)
dval9u = readNoiseAmp*readNoiseAmpFraction;
else if (dval9u < readNoiseAmp*-readNoiseAmpFraction)
dval9u = readNoiseAmp*-readNoiseAmpFraction;
const double dmod1 = j > 0 ? (double)*(rszloc[i] + j-1) - mval : 0;
const double dmod2 = j < nRows-1 ? (double)*(rszloc[i] + j+1) - mval : 0;
double dmod1u = dmod1;
if (dmod1u > readNoiseAmp*readNoiseAmpFraction)
dmod1u = readNoiseAmp*readNoiseAmpFraction;
else if (dmod1u < readNoiseAmp*-readNoiseAmpFraction)
dmod1u = readNoiseAmp*-readNoiseAmpFraction;
double dmod2u = dmod2;
if (dmod2u > readNoiseAmp*readNoiseAmpFraction)
dmod2u = readNoiseAmp*readNoiseAmpFraction;
else if (dmod2u < readNoiseAmp*-readNoiseAmpFraction)
dmod2u = readNoiseAmp*-readNoiseAmpFraction;
/*
IF IT'S WITHIN 2 SIGMA OF THE READNOISE, THEN
TEND TO TREAT AS READNOISE; IF IT'S FARTHER OFF
THAN THAT, THEN DOWNWEIGHT THE INFLUENCE
*/
const double readNoiseAmp2 = readNoiseAmp*readNoiseAmp;
const double w0 = dval0 * dval0 / (dval0 * dval0 + 4.0 * readNoiseAmp2);
const double w9 = dval9 * dval9 / (dval9 * dval9 + 18.0 * readNoiseAmp2);
const double w1 = 4 * readNoiseAmp2 / (dmod1 * dmod1 + 4.0 * readNoiseAmp2);
const double w2 = 4 * readNoiseAmp2 / (dmod2 * dmod2 + 4.0 * readNoiseAmp2);
/*(note that with the last two, if a pixel
is too discordant with its upper or lower
that neighbor has less of an ability to
pull it)*/
return dval0u * w0 * 0.25f + /* desire to keep the original pixel value */
dval9u * w9 * 0.25f + /* desire to keep the original sum over 3x3*/
dmod1u * w1 * 0.25f + /* desire to get closer to the pixel below*/
dmod2u * w2 * 0.25f; /* desire to get closer to the pixel above*/
}
|
transform_functions_fp32.h | // Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef CHEETAH_X86_TRANSFORM_FUNTIONS_FP32_H
#define CHEETAH_X86_TRANSFORM_FUNTIONS_FP32_H
#include "thread_affinity.h"
template <U32 C, U32 N>
inline void transformNCHWCxNx(U32 fc, U32 fh, U32 fw, U32 oc, const F32 *input, F32 *output)
{
F32 *dest = nullptr;
const F32 *src;
U32 cSize = 0, cSizePadding = 0;
U32 lstep = fc * fh * fw;
__m256i vindex = _mm256_set_epi32(
lstep * 7, lstep * 6, lstep * 5, lstep * 4, lstep * 3, lstep * 2, lstep, 0);
for (U32 c = 0; c < fc; c += cSize) {
cSize = UNI_MIN(fc - c, C);
cSizePadding = UNI_MIN(oc - c, C);
for (U32 hw = 0; hw < fh * fw; ++hw) {
for (U32 c8 = 0; c8 < cSize; ++c8) {
src = input + (c + c8) * fh * fw + hw;
dest = output + c * fh * fw * N + hw * cSizePadding * N + c8 * N;
if (N >= 8) {
_mm256_storeu_ps(dest, _mm256_i32gather_ps(src, vindex, 4));
}
if (N >= 16) {
_mm256_storeu_ps(dest + 8, _mm256_i32gather_ps(src + 8 * lstep, vindex, 4));
}
if (N >= 24) {
_mm256_storeu_ps(dest + 16, _mm256_i32gather_ps(src + 16 * lstep, vindex, 4));
}
if (N == 32) {
_mm256_storeu_ps(dest + 24, _mm256_i32gather_ps(src + 24 * lstep, vindex, 4));
}
}
memset(dest + N, 0, ((cSizePadding - cSize) * N * 4));
}
}
}
// N is 32/24
template <U32 C, U32 N>
inline EE transformNCHWToNCHWCxNx(
TensorDesc inputDesc, const F32 *input, TensorDesc outputDesc, F32 *output)
{
if (input == NULL || output == NULL) {
CHECK_STATUS(NULL_POINTER);
}
DataType fdt, odt;
DataFormat fdf, odf;
U32 fn, fc, fh, fw;
U32 on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &fdt, &fdf, &fn, &fc, &fh, &fw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
U32 remain = fn % N;
fn -= remain;
for (U32 n = 0; n < fn; n += N) {
transformNCHWCxNx<C, N>(fc, fh, fw, oc, input, output);
input += fc * fh * fw * N;
output += oc * fh * fw * N;
}
if (remain >= 24) {
transformNCHWCxNx<C, 24>(fc, fh, fw, oc, input, output);
input += fc * fh * fw * 24;
output += oc * fh * fw * 24;
remain -= 24;
}
if (remain >= 16) {
transformNCHWCxNx<C, 16>(fc, fh, fw, oc, input, output);
input += fc * fh * fw * 16;
output += oc * fh * fw * 16;
remain -= 16;
}
if (remain >= 8) {
transformNCHWCxNx<C, 8>(fc, fh, fw, oc, input, output);
input += fc * fh * fw * 8;
output += oc * fh * fw * 8;
remain -= 8;
}
if (remain > 0) {
F32 *dest = NULL;
U32 cSize = 0, cSizePadding = 0;
F32 m[8] = {0.0f};
for (U32 i = 0; i < remain; ++i) {
m[i] = -1.0f;
}
__m256 mask = _mm256_set_ps(m[7], m[6], m[5], m[4], m[3], m[2], m[1], m[0]);
U32 lstep = fc * fh * fw;
__m256i vindex = _mm256_set_epi32(
lstep * 7, lstep * 6, lstep * 5, lstep * 4, lstep * 3, lstep * 2, lstep, 0);
__m256 src256 = _mm256_setzero_ps();
for (U32 c = 0; c < fc; c += cSize) {
cSize = UNI_MIN(fc - c, C);
cSizePadding = UNI_MIN(oc - c, C);
for (U32 hw = 0; hw < fh * fw; ++hw) {
for (U32 c8 = 0; c8 < cSize; ++c8) {
const F32 *src = input + (c + c8) * fh * fw + hw;
dest = output + c * fh * fw * 8 + hw * cSizePadding * 8 + c8 * 8;
_mm256_storeu_ps(dest, _mm256_mask_i32gather_ps(src256, src, vindex, mask, 4));
}
memset(dest + 8, 0, ((cSizePadding - cSize) * 32));
}
}
fn += remain;
}
return SUCCESS;
}
inline void PaddingNCHWC8(
F32 *data, F32 *tmp, TensorDesc inputDesc, ConvolutionParamSpec convParamSpec)
{
// NCHWC8
DataType idt;
DataFormat idf;
U32 in, ic, ih, iw;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
U32 paddingT = convParamSpec.padding_top;
U32 paddingB = convParamSpec.padding_bottom;
U32 paddingL = convParamSpec.padding_left;
U32 paddingR = convParamSpec.padding_right;
U32 padih = paddingT + paddingB + ih;
U32 padiw = paddingL + paddingR + iw;
CHECK_REQUIREMENT((idf == DF_NCHWC8) && (ic % 8 == 0));
ic /= 8;
#ifdef _USE_OPENMP
#pragma omp parallel num_threads(OMP_NUM_THREADS)
{
#endif
#ifdef _USE_OPENMP
#pragma omp for schedule(static)
#endif
for (U32 c = 0; c < ic; ++c) {
U32 coff = c * padih * padiw * 8;
memset(tmp + coff, 0, padiw * paddingT * 8 * bytesOf(idt));
memset(tmp + coff + (ih + paddingT) * padiw * 8, 0, padiw * paddingB * 8 * bytesOf(idt));
}
#ifdef _USE_OPENMP
#pragma omp for schedule(static)
#endif
for (U32 hc = 0; hc < ih * ic; ++hc) {
U32 c = hc / ih;
U32 coff = c * padih * padiw * 8;
U32 h = hc % ih;
U32 hoff = (h + paddingT) * padiw;
memset(tmp + coff + hoff * 8, 0, paddingL * 8 * bytesOf(idt));
memcpy(tmp + coff + (hoff + paddingL) * 8, data + c * ih * iw * 8 + h * iw * 8,
iw * 8 * bytesOf(idt));
memset(tmp + coff + (hoff + (paddingL + iw)) * 8, 0, paddingR * 8 * bytesOf(idt));
}
#ifdef _USE_OPENMP
}
#endif
}
inline void deconvOverlapAndCrop(F32 *input,
F32 *output,
TensorDesc inputDesc,
TensorDesc outputDesc,
ConvolutionParamSpec convParamSpec)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
U32 fh = convParamSpec.kernel_h;
U32 fw = convParamSpec.kernel_w;
U32 fhfw = fh * fw;
U32 strideH = convParamSpec.stride_h;
U32 strideW = convParamSpec.stride_w;
U32 paddingT = convParamSpec.padding_top;
U32 paddingL = convParamSpec.padding_left;
__m256i vindex =
_mm256_set_epi32(fhfw * 7, fhfw * 6, fhfw * 5, fhfw * 4, fhfw * 3, fhfw * 2, fhfw, 0);
for (U32 kn = 0; kn < in; ++kn) {
for (U32 kh = 0; kh < ih; ++kh) {
for (U32 kw = 0; kw < iw; ++kw) {
for (U32 kc = 0; kc < oc; kc += 8) {
for (U32 jh = 0; jh < fh; ++jh) {
for (U32 jw = 0; jw < fw; ++jw) {
U32 ohIdx = kh * strideH + jh;
U32 owIdx = kw * strideW + jw;
if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) ||
(owIdx < paddingL) || (owIdx >= ow + paddingL)) {
continue;
}
ohIdx -= paddingT;
owIdx -= paddingL;
U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8;
U32 iidx = ((kh * iw + kw) * oc + kc) * fhfw + jh * fw + jw;
__m256 x = _mm256_i32gather_ps(input + iidx, vindex, 4);
x = _mm256_add_ps(x, _mm256_loadu_ps(output + oidx));
_mm256_storeu_ps(output + oidx, x);
}
}
}
}
}
input += ic * ih * iw;
output += oc * oh * ow;
}
}
inline void deconvOverlapAndCropNCHWC8(F32 *input,
F32 *output,
TensorDesc inputDesc,
TensorDesc outputDesc,
ConvolutionParamSpec convParamSpec)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
U32 fh = convParamSpec.kernel_h;
U32 fw = convParamSpec.kernel_w;
U32 fhfw = fh * fw;
U32 strideH = convParamSpec.stride_h;
U32 strideW = convParamSpec.stride_w;
U32 paddingT = convParamSpec.padding_top;
U32 paddingL = convParamSpec.padding_left;
for (U32 kn = 0; kn < in; ++kn) {
for (U32 kh = 0; kh < ih; ++kh) {
for (U32 kw = 0; kw < iw; ++kw) {
for (U32 kc = 0; kc < oc; kc += 8) {
for (U32 jh = 0; jh < fh; ++jh) {
for (U32 jw = 0; jw < fw; ++jw) {
U32 ohIdx = kh * strideH + jh;
U32 owIdx = kw * strideW + jw;
if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) ||
(owIdx < paddingL) || (owIdx >= ow + paddingL)) {
continue;
}
ohIdx -= paddingT;
owIdx -= paddingL;
U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8;
U32 iidx = ((jh * fw + jw) * oc + kc) * ih * iw + kh * iw * 8 + kw * 8;
_mm256_storeu_ps(output + oidx,
_mm256_add_ps(
_mm256_loadu_ps(input + iidx), _mm256_loadu_ps(output + oidx)));
}
}
}
}
}
input += ic * ih * iw;
output += oc * oh * ow;
}
}
inline void deconvOverlapAndCropEqualNCHWC8(F32 *input,
F32 *output,
const F32 *bias,
TensorDesc inputDesc,
TensorDesc outputDesc,
ConvolutionParamSpec convParamSpec)
{
DataType idt, odt;
DataFormat idf, odf;
U32 in, ic, ih, iw, on, oc, oh, ow;
CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw));
CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow));
U32 fh = convParamSpec.kernel_h;
U32 fw = convParamSpec.kernel_w;
U32 fhfw = fh * fw;
U32 strideH = convParamSpec.stride_h;
U32 strideW = convParamSpec.stride_w;
U32 paddingT = convParamSpec.padding_top;
U32 paddingL = convParamSpec.padding_left;
for (U32 kn = 0; kn < in; ++kn) {
for (U32 kc = 0; kc < oc; kc += 8) {
#ifdef _USE_OPENMP
#pragma omp parallel for num_threads(OMP_NUM_THREADS) schedule(static)
#endif
for (U32 kh = 0; kh < ih; ++kh) {
for (U32 kw = 0; kw < iw; ++kw) {
for (U32 jh = 0; jh < fh; ++jh) {
for (U32 jw = 0; jw < fw; ++jw) {
U32 ohIdx = kh * strideH + jh;
U32 owIdx = kw * strideW + jw;
if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) ||
(owIdx < paddingL) || (owIdx >= ow + paddingL)) {
continue;
}
ohIdx -= paddingT;
owIdx -= paddingL;
U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8;
U32 iidx = ((jh * fw + jw) * oc + kc) * ih * iw + kh * iw * 8 + kw * 8;
_mm256_storeu_ps(output + oidx, _mm256_loadu_ps(input + iidx));
}
}
}
}
}
input += ic * ih * iw;
output += oc * oh * ow;
}
}
#endif
|
main.c | /*
MIT License
Copyright (c) 2019 Novak Kaluđerović
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "gmp.h"
#include <time.h>
#include <inttypes.h>
#include <string.h>
#include "common.h"
#include <unistd.h>
/* Static parameters (should be read from a project file or input) */
static char* folder;
static uint64_t const_N;
static uint64_t const_k_N;
static int bits_N = 24;
static int seed = 0;
static int num_threads = 6;
static int read_data = 1;
static void usage()
{
fprintf(stderr, "precomp -b bname [ options ]\n");
fprintf(stderr, " -b bname: project file name\n");
fprintf(stderr, " -n bits: number of bits of N, default: 24\n");
fprintf(stderr, " -s seed: randomness seed, default: time(NULL)\n");
fprintf(stderr, " -t num_threads: number of threads. default: 6\n");
fprintf(stderr, " -r read_data: read precomputed data (1) or create it (0). default: 1\n");
fprintf(stderr, " -h help\n");
exit(1);
}
static void get_options(int argc, char **argv)
{
char c;
folder = NULL;
while ((c = getopt(argc, argv, "b:n:s:t:r:h")) != (char)(-1))
{
switch (c)
{
case 'b':
folder = optarg;
break;
case 'n':
if (sscanf(optarg, "%d", &bits_N) != 1)
complain("Bad argument to -n!\n");
break;
case 's':
if (sscanf(optarg, "%d", &seed) != 1)
complain("Bad argument to -s!\n");
break;
case 't':
if (sscanf(optarg, "%d", &num_threads) != 1)
complain("Bad argument to -t!\n");
break;
case 'r':
if (sscanf(optarg, "%d", &read_data) != 1)
complain("Bad argument to -r!\n");
break;
case 'h':
usage();
break;
default:
fprintf(stderr, "Bad option %c\n", (char)c);
usage();
}
}
if (folder == NULL)
{
fprintf(stderr, "argument -b bname is necessary\n");
usage();
}
}
static inline uint64_t generate_sequence(mpz_t R, mpz_t p, uint16_t seq_len)
{
assert(seq_len <= 64);
mpz_t tmp;
mpz_init_set(tmp, R);
uint64_t seq = 0;
for (uint16_t j = 0; j < seq_len; j++)
{
unsigned char bit = (1 - mpz_legendre(tmp, p)) >> 1;
seq |= ((uint64_t)bit << j);
mpz_add_ui(tmp,tmp,1);
}
mpz_clear(tmp);
return seq;
}
static inline uint64_t static_get_position(uint64_t i, position_t position)
{
int64_t appx_pos = floor( (((double)CONST_A)/((double)(1ULL << ADDRESS_NUM_BITS))) * ((int64_t)i) );
int64_t pos = (int64_t)position;
while (1)
{
if(labs(appx_pos - pos) < (1ULL << (ADDRESS_NUM_BITS - 1)))
return ((uint64_t)pos);
pos += (1ULL << ADDRESS_NUM_BITS);
}
}
static inline int binary_search_t(sequence_t *seqs, sequence_t val, uint16_t seqs_len)
{
int32_t left, right, idx;
idx = seqs_len >> 1;
left = 0;
right = seqs_len - 1;
while (left <= right)
{
if (seqs[idx] < val)
left = idx + 1;
else if (seqs[idx] == val)
return 0;
else
right = idx - 1;
idx = (left + right) >> 1;
}
return -1;
}
static inline int binary_search64(sequence_J *seq_J, uint64_t val, uint64_t seqs_len, uint32_t *dd, uint32_t *ii)
{
int32_t left, right, idx;
idx = seqs_len >> 1;
left = 0;
right = seqs_len - 1;
while (left <= right)
{
if (seq_J[idx].seq < val)
left = idx + 1;
else if (seq_J[idx].seq == val)
{
*ii = seq_J[idx].i;
*dd = seq_J[idx].d;
return 0;
}
else
right = idx - 1;
idx = (left + right) >> 1;
}
return -1;
}
static inline uint64_t static_get_sequence_from_id(unsigned char *k_symbols, uint32_t i, uint32_t d, uint16_t seq_len, unsigned char negate_result)
{
assert(seq_len <= 64);
uint64_t seq = 0;
for (int j = 0; j < seq_len; j++)
{
uint32_t idx = i + j*d;
uint32_t byte_idx = idx >> 3;
uint32_t bit_idx = 7 - (idx & 0x7);
unsigned char bit = (k_symbols[byte_idx] >> bit_idx) & 1;
seq |= ((uint64_t)bit << j);
}
if (negate_result)
seq ^= MASK(seq_len, 0);
return seq;
}
static inline void static_get_two_sequences_from_id(unsigned char *J_list, uint32_t i, uint32_t d, uint16_t seq_len, unsigned char negate_result, uint64_t *seq, uint64_t *seq_minus, unsigned char symbol_minus1)
{
assert(seq_len <= 64);
for (int j = 0; j < seq_len; j++)
{
uint64_t idx = i + j*d;
uint64_t byte_idx = idx >> 3;
uint64_t bit_idx = 7 - (idx & 0x7);
unsigned char bit = (J_list[byte_idx] >> bit_idx) & 1;
*seq |= ((uint64_t)bit << j);
*seq_minus |= ((uint64_t)bit << (seq_len - 1 - j));
}
if (negate_result)
*seq ^= MASK(seq_len, 0);
if (symbol_minus1^negate_result)
*seq_minus ^= MASK(seq_len, 0);
}
static inline unsigned char bitmap_hit(unsigned char *bitmap, uint64_t seq)
{
uint64_t bmp_pos = seq & MASK(BITMAP_NUM_BITS,0);
uint64_t pos_byt = bmp_pos >> 3;
unsigned char pos_bit = bmp_pos & 0x7;
unsigned char hit = (bitmap[pos_byt] >> pos_bit) & 1;
return hit;
}
static inline int is_the_key_correct(mpz_t key, mpz_t p, uint64_t *tests)
{
mpz_t tmp;
mpz_init_set(tmp, key);
mpz_add_ui(tmp, tmp, (NUM_TESTS - 1) * TEST_BIT_CHUNKS);
for (int i = NUM_TESTS - 1; i >= 0; i--)
{
uint64_t seq = generate_sequence(tmp, p, TEST_BIT_CHUNKS);
if (seq != tests[i])
return 0;
mpz_sub_ui(tmp, tmp, TEST_BIT_CHUNKS);
}
mpz_clear(tmp);
return 1;
}
int main(int argc, char* argv[])
{
char ppath[200], stringspath[200], occurrencespath[200], positionspath[200], bitmappath[200], allseqspath[200], jspath[200];
struct timespec start, end, parallel_end, parallel_start;
get_options(argc, argv);
//Get options and set initial parameters
omp_set_num_threads(num_threads); //number of threads
const_N = (1ULL << bits_N); //length of J_list
const_k_N = (uint64_t)floor((double)(const_N - 1)/((double)(CONST_L - 1)));
if(seed==0) seed=time(NULL);
sprintf(ppath, "%s/p", folder);
sprintf(stringspath, "%s/%s.bin", folder, folder);
sprintf(positionspath, "%s/positionsL%dA%d", folder, CONST_L, ADDRESS_NUM_BITS);
sprintf(bitmappath, "%s/bitmapL%dB%d", folder, CONST_L, BITMAP_NUM_BITS);
sprintf(allseqspath, "%s/allseqsL%dA%d", folder, CONST_L, ADDRESS_NUM_BITS);
//DEFINE VARIABLES
mpz_t p_global;
unsigned char *k_symbols = (unsigned char*) malloc(CONST_M_NUM_BYTES * sizeof(unsigned char));
position_t *positions = (position_t*) malloc((OCC_LEN + 1) * sizeof(position_t));
unsigned char* bitmap = (unsigned char*) malloc(BITMAP_NUM_BYTES);
sequence_t *precomp_seqs = (sequence_t*) malloc(CONST_A * sizeof(sequence_t));
sequence_J *J_hits = (sequence_J*) calloc(MAX_J_HITS, sizeof(sequence_J));
// READ INPUT DATA AND MAKE AUXILIARY DATA (common.c)
readprime(ppath, p_global);
readsymbols(stringspath, k_symbols);
if(read_data)
{
readpositions(positionspath, positions);
readbitmap(bitmappath, bitmap);
readallseqs(allseqspath, precomp_seqs);
}
else
{
makepositions(k_symbols, positions, p_global);
make_allseqs_and_bitmap(precomp_seqs, k_symbols, bitmap, positions, p_global);
sortallseqs(precomp_seqs, positions, num_threads);
}
// Start randomness
srand(seed);
gmp_randstate_t main_rstate;
gmp_randinit_default(main_rstate);
gmp_randseed_ui(main_rstate, rand());
// WORK
int stop = 0;
int num_js = 0;
uint64_t tot_trials = 0;
uint64_t tot_hits = 0;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
while (!stop)
{
printf("\nStarting the J sequence...\n");
mpz_t rand_j;
mpz_init(rand_j);
mpz_urandomm(rand_j, main_rstate, p_global);
mpz_set_str(rand_j, "4533551310507856472906002", 10);
gmp_printf("J = %Zd\n", rand_j);
++num_js;
uint64_t num_trials = 0;
uint64_t num_hits = 0;
uint32_t curr_d = 0;
// COMPUTE LIST OF SYMBOLS [J, J+N-1]
unsigned char *J_list_g = (unsigned char*) malloc((const_N + const_k_N ) >> 3);
generate_long_list(J_list_g, rand_j, p_global, const_N + const_k_N);
clock_gettime(CLOCK_MONOTONIC_RAW, ¶llel_start);
#pragma omp parallel shared(curr_d, stop, num_hits) reduction(+: num_trials)
{
mpz_t p, J, tmp;
mpz_init_set(p, p_global);
mpz_init_set(J, rand_j);
mpz_init(tmp);
uint32_t num_ds;
uint32_t d, d_index;
uint64_t seq_J, hit_index;
// TIMING AND COUNTING
int my_id = omp_get_thread_num();
struct timespec thread_start, thread_end;
uint64_t thread_trials = 0;
// VARIABLES
unsigned char symb_minus1;
uint32_t *list_of_ds;
// FUNCTIONS (common.h)
symbolofmin1(p, &symb_minus1);
denominators(const_N, &list_of_ds, &num_ds);
// COMPUTE LIST OF SYMBOLS [J, J+N-1]
unsigned char *J_list = (unsigned char*) malloc((const_N + const_k_N) >> 3);
memcpy(J_list, J_list_g, (const_N + const_k_N)>>3);
int stoppriv = 0;
clock_gettime(CLOCK_MONOTONIC_RAW, &thread_start);
while (!stoppriv)
{
#pragma omp atomic capture
d_index = curr_d++;
if (d_index >= num_ds)
break;
else
d = list_of_ds[d_index];
// Compute the Legendre symbol of d
mpz_set_ui(tmp, d);
unsigned char d_symbol = (1 - mpz_legendre(tmp, p)) >> 1;
// Generate the sequences and check for collisions
for (uint32_t i = 0; i < d; i++)
{
#pragma omp atomic read
stoppriv = stop;
if (stoppriv)
break;
seq_J = static_get_sequence_from_id(J_list, i, d, CONST_L, d_symbol);
if(bitmap_hit(bitmap, seq_J))
{
uint64_t addr = seq_J & MASK(ADDRESS_NUM_BITS, 0);
sequence_t val = (sequence_t)((seq_J >> ADDRESS_NUM_BITS) & MASK(CONST_L - ADDRESS_NUM_BITS, 0));
uint64_t pos_addrplus1 = static_get_position(addr + 1, positions[addr + 1]);
uint64_t pos_addr = static_get_position(addr, positions[addr]);
if(binary_search_t(&precomp_seqs[pos_addr], val, pos_addrplus1 - pos_addr) == 0)
{
#pragma omp atomic capture
hit_index = num_hits++;
if(hit_index >= MAX_J_HITS)
stop = 1;
else
{
J_hits[hit_index].seq = seq_J;
J_hits[hit_index].d = d;
J_hits[hit_index].i = i;
}
}
}
thread_trials += 1;
}
}
#pragma omp atomic update
num_trials += thread_trials;
clock_gettime(CLOCK_MONOTONIC_RAW, &thread_end);
//printf("Thread %d finished %ld trials in %lf seconds\n", my_id, thread_trials, delta(&thread_end, &thread_start));
mpz_clear(tmp);
mpz_clear(J);
mpz_clear(p);
free(J_list);
free(list_of_ds);
}
clock_gettime(CLOCK_MONOTONIC_RAW, ¶llel_end);
if (num_hits == MAX_J_HITS) num_hits--;
printf("J done!\n");
printf("Number of trials = %" PRIu64 "\n", num_trials);
printf("Number of hits is %" PRIu64 "\n", num_hits);
printf("Finished the trials in %lf seconds\n", delta(¶llel_end, ¶llel_start));
printf("Now testing if hits are good...\n");
tot_trials += num_trials;
tot_hits += num_hits;
gmp_sprintf(jspath, "%s/Jlist%Zd", folder, rand_j);
// Sort J_hits
qsort(J_hits, num_hits, sizeof(sequence_J), compareJ_seq);
// Write rand_j and J_hits to a file
FILE *fp = fopen(jspath, "wb");
assert((fp != NULL) && "Error opening the J_hits output file");
fwrite((sequence_J *)J_hits, num_hits * sizeof(sequence_J), 1, fp);
fclose(fp);
stop = 0;
curr_d = 0;
clock_gettime(CLOCK_MONOTONIC_RAW, ¶llel_start);
if(num_hits)
#pragma omp parallel shared(curr_d, stop, k_symbols, num_hits)
{
uint32_t ii, d, dd;
mpz_t k, J, dinv, p, tmp;
mpz_inits(k, dinv, tmp, NULL);
mpz_init_set(J, rand_j);
mpz_init_set(p, p_global);
// TIMING AND COUNTING
int my_id = omp_get_thread_num();
struct timespec thread_start, thread_end;
uint64_t thread_trials = 0;
// VARIABLES
unsigned char symbol_minus1;
uint64_t tests[NUM_TESTS];
// FUNCTIONS (common.h)
symbolofmin1(p, &symbol_minus1);
maketests(k_symbols, tests);
int stoppriv = 0;
clock_gettime(CLOCK_MONOTONIC_RAW, &thread_start);
while(!stoppriv)
{
#pragma omp atomic capture
d = curr_d++;
if (d > CONST_k)
break;
mpz_set_ui(tmp, d);
unsigned char d_symbol = (1 - mpz_legendre(tmp, p)) >> 1;
for(uint32_t j = 0; j < d; j++)
{
#pragma omp atomic read
stoppriv = stop;
if (stoppriv)
break;
uint64_t seq = 0;
uint64_t seq_minus = 0;
for (uint32_t i = j; i < CONST_M-(CONST_L-1)*d; i+=d)
{
if (i == j)
static_get_two_sequences_from_id(k_symbols, i, d, CONST_L, d_symbol, &seq, &seq_minus, symbol_minus1);
else
{
uint32_t idx = i + (CONST_L - 1)*d;
uint32_t byte_idx = idx >> 3;
uint32_t bit_idx = 7 - (idx & 0x7);
unsigned char bit = (k_symbols[byte_idx] >> bit_idx) & 1;
bit ^= d_symbol;
seq = (seq >> 1) | ((uint64_t)bit << (CONST_L - 1));
seq = seq & MASK(CONST_L, 0);
bit ^= symbol_minus1;
seq_minus = (seq_minus << 1) | ((uint64_t)bit);
seq_minus = seq_minus & MASK(CONST_L, 0);
}
if(binary_search64(J_hits, seq, num_hits, &dd, &ii) == 0)
{
mpz_set_ui(dinv, dd);
mpz_invert(dinv, dinv, p);
mpz_set(k,J);
mpz_add_ui(k,k,ii);
mpz_mul(k,k,dinv);
mpz_mul_ui(k,k,d);
mpz_sub_ui(k,k,i);
mpz_mod(k,k,p);
if (is_the_key_correct(k, p, tests))
{
stop = 1;
gmp_printf("k = %Zd\n", k);
break;
}
}
if(binary_search64(J_hits, seq_minus, num_hits, &dd, &ii) == 0)
{
mpz_set_ui(dinv, dd);
mpz_invert(dinv, dinv, p);
mpz_set(k,J);
mpz_add_ui(k,k,ii);
mpz_mul(k,k,dinv);
mpz_mul_ui(k,k,d);
mpz_mul_si(k,k,-1);
mpz_sub_ui(k,k,(CONST_L-1)*(uint64_t)d);
mpz_sub_ui(k,k,i);
mpz_mod(k,k,p);
if (is_the_key_correct(k, p, tests))
{
stop = 1;
gmp_printf("k = %Zd\n", k);
break;
}
}
}
}
}
clock_gettime(CLOCK_MONOTONIC_RAW, &thread_end);
//printf("Thread %d finished testing hits in %lf seconds\n", my_id, delta(&thread_end, &thread_start));
mpz_clears(tmp, J, p, k, dinv, NULL);
}
clock_gettime(CLOCK_MONOTONIC_RAW, ¶llel_end);
printf("Finished testing hits in %lf seconds\n", delta(¶llel_end, ¶llel_start));
} // END WHILE J LOOP
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
printf("\n\nTime taken to solve is %lf seconds\n", delta(&end, &start));
printf("Total number of trials is %" PRIu64 "\n", tot_trials);
printf("Total number of hits is %" PRIu64 "\n", tot_hits);
printf("Total number of Js is %d\n", num_js);
free(k_symbols);
free(positions);
free(precomp_seqs);
free(bitmap);
return 0;
}
|
vect-simd-clone-15.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#ifndef N
#define N 1024
#endif
int array[N];
#pragma omp declare simd linear(val(b):-3), notinbranch
__attribute__((noinline)) int
foo (int a, int b)
{
return a + b;
}
__attribute__((noinline, noclone)) void
bar ()
{
int i;
#pragma omp simd
for (i = 0; i < N; ++i)
array[i] = foo (i >> 1, -i * 3);
}
int
main ()
{
int i;
check_vect ();
bar ();
for (i = 0; i < N; i++)
if (array[i] != ((i >> 1) + (-3 * i)))
abort ();
return 0;
}
|
normal_gap_process.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED )
#define KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED
// System includes
// External includes
// Project includes
#include "processes/process.h"
#include "includes/model_part.h"
#include "processes/simple_mortar_mapper_process.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NormalGapProcess
* @ingroup ContactStructuralMechanicsApplication
* @brief This process computes the normal gap
* @author Vicente Mataix Ferrandiz
* @tparam TDim The dimension of work
* @tparam TNumNodes The number of nodes of the slave
* @tparam TNumNodesMaster The number of nodes of the master
*/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes>
class KRATOS_API(CONTACT_STRUCTURAL_MECHANICS_APPLICATION) NormalGapProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// The type of mapper considered
typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType;
/// General type definitions
typedef ModelPart::NodesContainerType NodesArrayType;
/// The definition of zero tolerance
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
/// Pointer definition of NormalGapProcess
KRATOS_CLASS_POINTER_DEFINITION( NormalGapProcess );
///@}
///@name Enum's
///@{
///@}
///@name Life Cycle
///@{
/**
* @brief The constructor of the normal gap process uses the following inputs:
* @param rMasterModelPart The master model part to be considered
* @param rSlaveModelPart The slave model part to be considered
*/
NormalGapProcess(
ModelPart& rMasterModelPart,
ModelPart& rSlaveModelPart,
const bool SearchOrientation = true
) : mrMasterModelPart(rMasterModelPart),
mrSlaveModelPart(rSlaveModelPart),
mSearchOrientation(SearchOrientation)
{
}
virtual ~NormalGapProcess()= default;;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/**
* @brief Execute method is used to execute the Process algorithms.
*/
void Execute() override;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/************************************ GET INFO *************************************/
/***********************************************************************************/
std::string Info() const override
{
return "NormalGapProcess";
}
/************************************ PRINT INFO ***********************************/
/***********************************************************************************/
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
ModelPart& mrMasterModelPart; /// The master model part to be considered
ModelPart& mrSlaveModelPart; /// The slave model part to be considered
const bool mSearchOrientation; /// The orientation of the search (inverted or not)
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method switchs the flag of an array of nodes
* @param rNodes The set of nodes where the flags are reset
*/
static inline void SwitchFlagNodes(NodesArrayType& rNodes)
{
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) {
auto it_node = rNodes.begin() + i;
it_node->Flip(SLAVE);
it_node->Flip(MASTER);
}
}
/**
* @brief This method computes the normal gap
* @param rNodes The set of nodes where the gap is computed
*/
void ComputeNormalGap(NodesArrayType& rNodes);
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class NormalGapProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/****************************** INPUT STREAM FUNCTION ******************************/
/***********************************************************************************/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster>
inline std::istream& operator >> (std::istream& rIStream,
NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis);
/***************************** OUTPUT STREAM FUNCTION ******************************/
/***********************************************************************************/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster>
inline std::ostream& operator << (std::ostream& rOStream,
const NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis)
{
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED defined
|
softmax_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include "softmax_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
#include <arm_neon.h>
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int ret = 0;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] ||
input_tensor->dims[3] != output_tensor->dims[3])
ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num);
return ret;
}
static inline float32x4_t vexpq10_f32(float32x4_t x)
{
x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
return x;
}
static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread)
{
float* input_ptr = ( float* )input;
float* array_ptr = ( float* )array;
memset(array, 0, in_size * sizeof(float));
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
// #pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (in_size & -4); i += 4)
{
float32x4_t _p = vld1q_f32(array_ptr + i);
float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i);
#ifdef __aarch64__
_p = vpmaxq_f32(_p, _in);
#else
_p = vmaxq_f32(_p, vrev64q_f32(_in));
_p = vmaxq_f32(_p, vextq_f32(_p, _in, 2));
#endif
vst1q_f32(array_ptr + i, _p);
}
for (int i = in_size & ~3; i < in_size; i++)
{
if (array_ptr[i] < input_ptr[j * in_size + i])
array_ptr[i] = input_ptr[j * in_size + i];
}
/*
for(int l = 0; l < in_size; l++)
{
if(array_ptr[l] < input_ptr[j * in_size + l])
array_ptr[l] = input_ptr[j * in_size + l];
}
*/
}
}
static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size,
int num_thread)
{
float* input_ptr = ( float* )input;
float* output_ptr = ( float* )output;
float* maxarray_ptr = ( float* )maxarray;
float* sum_array_ptr = ( float* )sum_array;
memset(sum_array, 0x0, in_size * sizeof(float));
/* get the exp and the summary */
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < on_size; j++)
{
// #pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (in_size & -4); i += 4)
{
int index = j * in_size + i;
float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i)));
float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out);
vst1q_f32(output_ptr + index, out);
vst1q_f32(sum_array_ptr + i, sum);
}
for (int i = in_size & ~3; i < in_size; i++)
{
int index = j * in_size + i;
output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]);
sum_array_ptr[i] += output_ptr[index];
}
}
/*
for(int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] = exp(input_ptr[index] - array_ptr[l]);
sum_array_ptr[l] += output_ptr[index];
}
*/
/* the final result */
for (int j = 0; j < on_size; j++)
for (int l = 0; l < in_size; l++)
{
int index = j * in_size + l;
output_ptr[index] /= sum_array_ptr[l];
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem;
int element_size = input_tensor->elem_size;
int dims[4];
for (int i = 0; i < input_tensor->dim_num; i++)
{
dims[i] = input_tensor->dims[i];
}
int axis = softmax_param->axis;
int out_size, in_size, on_size;
out_size = 1;
for (int i = 0; i < axis; i++)
{
out_size *= dims[i];
}
in_size = 1;
for (size_t i = axis + 1; i < input_tensor->dim_num; i++)
{
in_size *= dims[i];
}
on_size = dims[axis];
uint8_t* input = input_tensor->data;
uint8_t* output = output_tensor->data;
float* max_array = ( float* )malloc(in_size * sizeof(float));
float* sum_array = ( float* )malloc(in_size * sizeof(float));
int on_in_size = on_size * in_size;
float* input_f = NULL;
float* output_f = NULL;
if (element_size == 1)
{
input_f = ( float* )malloc(on_in_size * 4);
output_f = ( float* )malloc(on_in_size * 4);
/* todo */
free(input_f);
free(output_f);
}
for (int i = 0; i < out_size; i++)
{
/* get max */
int img_base = i * on_in_size * element_size;
GetMaxArray(( float* )(input + img_base), max_array, in_size, on_size, exec_graph->num_thread);
GetOutResult(( float* )(input + img_base), ( float* )(output + img_base), max_array, sum_array, in_size,
on_size, exec_graph->num_thread);
}
free(max_array);
free(sum_array);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* todo support uint8 */
if (input_tensor->data_type != TENGINE_DT_FP32)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_softmax_hcl_arm_op()
{
return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
int unregister_softmax_hcl_arm_op()
{
return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops);
}
|
tentusscher_epi_2004_S1_2.c | #include <assert.h>
#include <stdlib.h>
#include "tentusscher_epi_2004_S1_2.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
} |
FullyDistVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_VEC_H_
#define _FULLY_DIST_VEC_H_
#include <iostream>
#include <fstream>
#include <vector>
#include <utility>
#include <iterator>
#include <random>
#include "CombBLAS.h"
#include "CommGrid.h"
#include "FullyDist.h"
#include "Exception.h"
namespace combblas {
template <class IT, class NT>
class FullyDistSpVec;
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class DenseVectorLocalIterator;
// ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices;
// therefore it can not be 32-bits, in general.
template <class IT, class NT>
class FullyDistVec: public FullyDist<IT,NT, typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type >
{
public:
FullyDistVec ( );
FullyDistVec ( IT globallen, NT initval);
FullyDistVec ( std::shared_ptr<CommGrid> grid);
FullyDistVec ( std::shared_ptr<CommGrid> grid, IT globallen, NT initval);
FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor
FullyDistVec ( const std::vector<NT> & fillarr, std::shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a vector from each processor
template <class ITRHS, class NTRHS>
FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
void ParallelWrite(const std::string & filename, bool onebased, HANDLER handler, bool includeindices = true)
{
FullyDistSpVec<IT,NT> tmpSpVec = *this; // delegate
tmpSpVec.ParallelWrite(filename, onebased, handler, includeindices);
}
void ParallelWrite(const std::string & filename, bool onebased, bool includeindices = true) { ParallelWrite(filename, onebased, ScalarReadSaveHandler(), includeindices); };
template <typename _BinaryOperation>
void ParallelRead (const std::string & filename, bool onebased, _BinaryOperation BinOp)
{
FullyDistSpVec<IT,NT> tmpSpVec = *this; // delegate
tmpSpVec.ParallelRead(filename, onebased, BinOp);
*this = tmpSpVec; // sparse -> dense conversion
}
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); }
template <class ITRHS, class NTRHS>
FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion
FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator
FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < arr.size(); ++i)
arr[i] = fixedval;
return *this;
}
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref
FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs);
bool operator==(const FullyDistVec<IT,NT> & rhs) const;
void SetElement (IT indx, NT numx); // element-wise assignment
void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index
NT GetElement (IT indx) const; // element-wise fetch
NT operator[](IT indx) const // more c++ like API
{
return GetElement(indx);
}
void Set(const FullyDistSpVec< IT,NT > & rhs);
template <class NT1, typename _BinaryOperationIdx, typename _BinaryOperationVal>
void GSet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win);
template <class NT1, typename _BinaryOperationIdx>
FullyDistSpVec<IT,NT> GGet (const FullyDistSpVec<IT,NT1> & spVec, _BinaryOperationIdx __binopIdx, NT nullValue);
void iota(IT globalsize, NT first);
void RandPerm(); // randomly permute the vector
FullyDistVec<IT,IT> sort(); // sort and return the permutation
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized
//TODO: we should change this function and return the vector directly
const NT * GetLocArr() const { return arr.data(); } // = MyLocLength() once arr is resized
template <typename _Predicate>
FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true
FullyDistSpVec<IT,NT> Find(NT val) const; //!< Return the elements val is found
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
std::transform(arr.begin(), arr.end(), arr.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(size_t i=0; i < arr.size(); ++i)
arr[i] = __binary_op(arr[i], i + offset);
}
template <typename _UnaryOperation, typename IRRELEVANT_NT>
void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask);
// extended callback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp);
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp);
// plain fallback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
true);
}
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
applyNulls, nullValue, true);
}
template <typename T1, typename T2>
class retTrue {
public:
bool operator()(const T1& x, const T2& y)
{
return true;
}
};
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>());
}
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue);
}
void PrintToFile(std::string prefix)
{
std::ofstream output;
commGrid->OpenDebugFile(prefix, output);
std::copy(arr.begin(), arr.end(), std::ostream_iterator<NT> (output, " "));
output << std::endl;
output.close();
}
void PrintInfo(std::string vectorname) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
std::pair<IT, NT> MinElement() const; // returns <index, value> pair of global minimum
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT identity) const; //! Reduce can be used to implement max_element, for instance
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void SelectCandidates(double nver);
template <typename _BinaryOperation, typename OUT = typename std::result_of<_BinaryOperation&(NT,NT)>::type>
void EWiseOut(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op, FullyDistVec<IT,OUT> & result);
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
//FUAD
void GetElements (std::vector<IT>& indx_vec, std::vector<NT>& out_vec) const;
private:
std::vector< NT > arr;
template <typename _BinaryOperation>
void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op);
template <class IU, class NU>
friend class DenseParMat;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class DenseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x );
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename IU, typename NU1, typename NU2, typename _BinaryOperation>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
template <typename IU, typename NU>
friend FullyDistVec<IU,NU> Concatenate ( std::vector< FullyDistVec<IU,NU> > & vecs);
template <typename IU, typename NU>
friend void Augment (FullyDistVec<int64_t, int64_t>& mateRow2Col, FullyDistVec<int64_t, int64_t>& mateCol2Row,
FullyDistVec<int64_t, int64_t>& parentsRow, FullyDistVec<int64_t, int64_t>& leaves);
template <class IU, class DER>
friend SpParMat<IU, bool, DER> PermMat (const FullyDistVec<IU,IU> & ri, const IU ncol);
friend void maximumMatching(SpParMat < int64_t, bool, SpDCCols<int64_t,bool> > & A, FullyDistVec<int64_t, int64_t>& mateRow2Col,FullyDistVec<int64_t, int64_t>& mateCol2Row);
};
}
#include "../src/FullyDistVec.cpp"
#endif
|
mafillvcompmain.c | /* CalculiX - A 3-dimensional finite element program */
/* Copyright (C) 1998-2015 Guido Dhondt */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as */
/* published by the Free Software Foundation(version 2); */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <pthread.h>
#include "CalculiX.h"
static char *lakonf1;
static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*nzs1,*ielfa1,*
ifabou1,*nbody1,*neq1,*nactdohinv1,*icyclic1,*ifatie1;
static double *auv1=NULL,*adv1=NULL,*bv1=NULL,*vfa1,*xxn1,*area1,*vel1,
*cosa1,*umfa1,*xlet1,*xle1,*gradvfa1,*xxi1,*body1,*volume1,*dtimef1,
*velo1,*veloo1,*sel1,*xrlfa1,*gamma1,*xxj1,*a11,*a21,*a31,*flux1,
*c1;
void mafillvcompmain(ITG *nef,ITG *ipnei,ITG *neifa,ITG *neiel,
double *vfa,double *xxn,double *area,double *auv,double *adv,
ITG *jq,ITG *irow,ITG *nzs,double *bv,double *vel,double *cosa,
double *umfa,double *xlet,double *xle,double *gradvfa,
double *xxi,double *body,double *volume,
ITG *ielfa,char *lakonf,ITG *ifabou,ITG *nbody,ITG *neq,
double *dtimef,double *velo,double *veloo,
double *sel,double *xrlfa,double *gamma,double *xxj,
ITG *nactdohinv,double *a1,double *a2,double *a3,double *flux,
ITG *icyclic,double *c,ITG *ifatie){
ITG i,j;
/* variables for multithreading procedure */
ITG sys_cpus,*ithread=NULL;
char *env,*envloc,*envsys;
// printf("entered mafillvcompmain \n");
num_cpus = 0;
sys_cpus=0;
/* explicit user declaration prevails */
envsys=getenv("NUMBER_OF_CPUS");
if(envsys){
sys_cpus=atoi(envsys);
if(sys_cpus<0) sys_cpus=0;
}
/* automatic detection of available number of processors */
if(sys_cpus==0){
sys_cpus = getSystemCPUs();
if(sys_cpus<1) sys_cpus=1;
}
/* local declaration prevails, if strictly positive */
envloc = getenv("CCX_NPROC_CFD");
if(envloc){
num_cpus=atoi(envloc);
if(num_cpus<0){
num_cpus=0;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
/* else global declaration, if any, applies */
env = getenv("OMP_NUM_THREADS");
if(num_cpus==0){
if (env)
num_cpus = atoi(env);
if (num_cpus < 1) {
num_cpus=1;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
// next line is to be inserted in a similar way for all other paralell parts
if(*nef<num_cpus) num_cpus=*nef;
pthread_t tid[num_cpus];
/* allocating fields for lhs and rhs matrix */
NNEW(adv1,double,num_cpus**neq);
NNEW(auv1,double,(long long)num_cpus*2**nzs);
NNEW(bv1,double,num_cpus*3**neq);
/* calculating the stiffness and/or mass matrix
(symmetric part) */
nef1=nef;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;vfa1=vfa;xxn1=xxn;
area1=area;jq1=jq;irow1=irow;nzs1=nzs;vel1=vel;cosa1=cosa;umfa1=umfa;
xlet1=xlet;xle1=xle;gradvfa1=gradvfa;xxi1=xxi;body1=body;volume1=volume;
ielfa1=ielfa;lakonf1=lakonf;ifabou1=ifabou;nbody1=nbody;neq1=neq;
dtimef1=dtimef;velo1=velo;veloo1=veloo;sel1=sel;xrlfa1=xrlfa;
gamma1=gamma;xxj1=xxj;nactdohinv1=nactdohinv;a11=a1;a21=a2;a31=a3;
flux1=flux;icyclic1=icyclic;c1=c;ifatie1=ifatie;
/* create threads and wait */
NNEW(ithread,ITG,num_cpus);
for(i=0; i<num_cpus; i++) {
ithread[i]=i;
pthread_create(&tid[i], NULL, (void *)mafillvcompmt, (void *)&ithread[i]);
}
for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL);
SFREE(ithread);
/* copying and accumulating the stiffnes and/or mass matrix */
#pragma omp parallel \
default(none) \
shared(neq,adv,adv1,num_cpus,nzs,auv,auv1,bv,bv1) \
private(i,j)
{
#pragma omp for
for(i=0;i<*neq;i++){
adv[i]=adv1[i];
for(j=1;j<num_cpus;j++){
adv[i]+=adv1[i+j**neq];
}
}
#pragma omp for
for(i=0;i<2**nzs;i++){
auv[i]=auv1[i];
for(j=1;j<num_cpus;j++){
auv[i]+=auv1[i+(long long)j*2**nzs];
}
}
#pragma omp for
for(i=0;i<3**neq;i++){
bv[i]=bv1[i];
for(j=1;j<num_cpus;j++){
bv[i]+=bv1[i+j*3**neq];
}
}
}
SFREE(adv1);
SFREE(auv1);
SFREE(bv1);
return;
}
/* subroutine for multithreading of mafillpcomp */
void *mafillvcompmt(ITG *i){
ITG indexadv,indexbv,nefa,nefb,nefdelta;
long long indexauv;
indexadv=*i**neq1;
indexauv=(long long)*i*2**nzs1;
indexbv=*i*3**neq1;
// ceil -> floor
nefdelta=(ITG)floor(*nef1/(double)num_cpus);
nefa=*i*nefdelta+1;
nefb=(*i+1)*nefdelta;
// next line! -> all parallel sections
if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1;
FORTRAN(mafillvcomp,(nef1,ipnei1,neifa1,neiel1,vfa1,xxn1,area1,
&auv1[indexauv],&adv1[indexadv],jq1,irow1,nzs1,&bv1[indexbv],
vel1,cosa1,umfa1,xlet1,xle1,gradvfa1,xxi1,
body1,volume1,ielfa1,lakonf1,ifabou1,nbody1,neq1,
dtimef1,velo1,veloo1,sel1,xrlfa1,gamma1,xxj1,nactdohinv1,a11,
a21,a31,flux1,&nefa,&nefb,icyclic1,c1,ifatie1));
return NULL;
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
scale,
translate;
void
**magick_restrict pixels;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static void **DestroyPixelThreadSet(void **pixels)
{
ssize_t
i;
if (pixels == (void **) NULL)
return((void **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (void *) NULL)
pixels[i]=RelinquishMagickMemory(pixels[i]);
pixels=(void **) RelinquishMagickMemory(pixels);
return(pixels);
}
static void **AcquirePixelThreadSet(const size_t columns,
const size_t channels,MagickBooleanType highres)
{
ssize_t
i;
size_t
number_threads;
size_t
size;
void
**pixels;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (void **) NULL)
return((void **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
size=sizeof(double);
if (highres == MagickFalse)
size=sizeof(Quantum);
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=AcquireQuantumMemory(columns,channels*size);
if (pixels[i] == (void *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
static void TransformDoublePixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
#define GetLCMSPixel(source_info,pixel) \
(source_info->scale*QuantumScale*(pixel)+source_info->translate)
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate)
double
*p;
ssize_t
x;
p=(double *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(image,q));
if (source_info->channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(image,q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(image,q));
}
if (source_info->channels > 3)
*p++=GetLCMSPixel(source_info,GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(double *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,SetLCMSPixel(target_info,*p),q);
else
SetPixelRed(image,SetLCMSPixel(target_info,*p),q);
p++;
if (target_info->channels > 1)
{
SetPixelGreen(image,SetLCMSPixel(target_info,*p),q);
p++;
SetPixelBlue(image,SetLCMSPixel(target_info,*p),q);
p++;
}
if (target_info->channels > 3)
{
SetPixelBlack(image,SetLCMSPixel(target_info,*p),q);
p++;
}
q+=GetPixelChannels(image);
}
}
static void TransformQuantumPixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
Quantum
*p;
ssize_t
x;
p=(Quantum *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetPixelRed(image,q);
if (source_info->channels > 1)
{
*p++=GetPixelGreen(image,q);
*p++=GetPixelBlue(image,q);
}
if (source_info->channels > 3)
*p++=GetPixelBlack(image,q);
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(Quantum *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,*p++,q);
else
SetPixelRed(image,*p++,q);
if (target_info->channels > 1)
{
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
}
if (target_info->channels > 3)
SetPixelBlack(image,*p++,q);
q+=GetPixelChannels(image);
}
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#ifndef TYPE_XYZ_8
#define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1))
#endif
#define ThrowProfileException(severity,tag,context) \
{ \
if (profile != (StringInfo *) NULL) \
profile=DestroyStringInfo(profile); \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
const char
*artifact;
#endif
MagickBooleanType
highres;
MagickOffsetType
progress;
ssize_t
y;
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
highres=MagickTrue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
artifact=GetImageArtifact(image,"profile:highres-transform");
if (IsStringFalse(artifact) != MagickFalse)
highres=MagickFalse;
#endif
source_info.scale=1.0;
source_info.translate=0.0;
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
}
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
}
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
target_info.scale=1.0;
target_info.translate=0.0;
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
}
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
}
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels,highres);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels,highres);
if ((source_info.pixels == (void **) NULL) ||
(target_info.pixels == (void **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (highres != MagickFalse)
TransformDoublePixels(id,image,&source_info,&target_info,transform,q);
else
TransformQuantumPixels(id,image,&source_info,&target_info,transform,q);
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static void PatchCorruptProfile(const char *name,StringInfo *profile)
{
unsigned char
*p;
size_t
length;
/*
Detect corrupt profiles and if discovered, repair.
*/
if (LocaleCompare(name,"xmp") == 0)
{
/*
Remove garbage after xpacket end.
*/
p=GetStringInfoDatum(profile);
p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>");
if (p != (unsigned char *) NULL)
{
p+=19;
length=p-GetStringInfoDatum(profile);
if (length != GetStringInfoLength(profile))
{
*p='\0';
SetStringInfoLength(profile,length);
}
}
return;
}
if (LocaleCompare(name,"exif") == 0)
{
/*
Check if profile starts with byte order marker instead of Exif.
*/
p=GetStringInfoDatum(profile);
if ((LocaleNCompare((const char *) p,"MM",2) == 0) ||
(LocaleNCompare((const char *) p,"II",2) == 0))
{
const unsigned char
profile_start[] = "Exif\0\0";
StringInfo
*exif_profile;
exif_profile=AcquireStringInfo(6);
if (exif_profile != (StringInfo *) NULL)
{
SetStringInfoDatum(exif_profile,profile_start);
ConcatenateStringInfo(exif_profile,profile);
SetStringInfoLength(profile,GetStringInfoLength(exif_profile));
SetStringInfo(profile,exif_profile);
exif_profile=DestroyStringInfo(exif_profile);
}
}
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile,ExceptionInfo *exception)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s' (XMP)",image->filename);
return(MagickFalse);
}
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(Image *image,
const StringInfo *profile,ExceptionInfo *exception)
{
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateWarning,
"DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename);
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
MagickBooleanType
status;
StringInfo
*clone_profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
clone_profile=CloneStringInfo(profile);
PatchCorruptProfile(name,clone_profile);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(image,clone_profile,exception) == MagickFalse))
{
clone_profile=DestroyStringInfo(clone_profile);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),clone_profile);
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,clone_profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,clone_profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
static void UpdateClipPath(unsigned char *blob,size_t length,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
ssize_t
i;
ssize_t
knot_count,
selector;
knot_count=0;
while (length != 0)
{
selector=(ssize_t) ReadProfileMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot.
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
double
x,
y;
signed int
xx,
yy;
y=(double) ReadProfileMSBLong(&blob,&length);
y=y*old_rows/4096.0/4096.0;
y-=new_geometry->y;
yy=(signed int) ((y*4096*4096)/new_geometry->height);
WriteProfileLong(MSBEndian,(size_t) yy,blob-4);
x=(double) ReadProfileMSBLong(&blob,&length);
x=x*old_columns/4096.0/4096.0;
x-=new_geometry->x;
xx=(signed int) ((x*4096*4096)/new_geometry->width);
WriteProfileLong(MSBEndian,(size_t) xx,blob-4);
}
knot_count--;
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
}
MagickPrivate void Update8BIMClipPath(const Image *image,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
const StringInfo
*profile;
size_t
length;
ssize_t
count,
id;
unsigned char
*info;
assert(image != (Image *) NULL);
assert(new_geometry != (RectangleInfo *) NULL);
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return;
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while (length > 0)
{
if (ReadProfileByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadProfileMSBShort(&info,&length);
count=(ssize_t) ReadProfileByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
info+=count;
length-=count;
}
if ((count & 0x01) == 0)
(void) ReadProfileByte(&info,&length);
count=(ssize_t) ReadProfileMSBLong(&info,&length);
if ((count < 0) || ((size_t) count > length))
{
length=0;
continue;
}
if ((id > 1999) && (id < 2999))
UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry);
info+=count;
length-=MagickMin(count,(ssize_t) length);
}
}
|
GB_binop__second_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__second_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int8)
// A*D function (colscale): GB (_AxD__second_int8)
// D*A function (rowscale): GB (_DxB__second_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// A pattern? 1
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
omp_ex_14.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
unsigned int a = 90;
printf("Before a = %i\n", a);
#pragma omp parallel private(a)
{
a += 10 + omp_get_thread_num();
printf("Inside a = %i\n", a);
}
printf("After a = %i\n", a);
return 0;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches decomposition-declarations.
///
/// Examples matches the declaration node with \c foo and \c bar, but not
/// \c number.
/// (matcher = declStmt(has(decompositionDecl())))
///
/// \code
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl>
decompositionDecl;
/// Matches binding declarations
/// Example matches \c foo and \c bar
/// (matcher = bindingDecl()
///
/// \code
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl>
bindingDecl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches class bases.
///
/// Examples matches \c public virtual B.
/// \code
/// class B {};
/// class C : public virtual B {};
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches template template parameter declarations.
///
/// Given
/// \code
/// template <template <typename> class Z, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'Z', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
TemplateTemplateParmDecl>
templateTemplateParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder) != List.end();
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreUnlessSpelledInSource,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename... P> class MatcherT, typename... P,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>
traverse(TraversalKind TK,
const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK,
InnerMatcher);
}
template <typename... T>
internal::Matcher<typename internal::GetClade<T...>::Type>
traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) {
return traverse(TK, InnerMatcher.with());
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that refers to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
/// See also the binaryOperation() matcher for more-general matching of binary
/// uses of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches rewritten binary operators
///
/// Example matches use of "<":
/// \code
/// #include <compare>
/// struct HasSpaceshipMem {
/// int a;
/// constexpr auto operator<=>(const HasSpaceshipMem&) const = default;
/// };
/// void compare() {
/// HasSpaceshipMem hs1, hs2;
/// if (hs1 < hs2)
/// return;
/// }
/// \endcode
/// See also the binaryOperation() matcher for more-general matching
/// of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXRewrittenBinaryOperator>
cxxRewrittenBinaryOperator;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches co_return statements.
///
/// Given
/// \code
/// while (true) { co_return; }
/// \endcode
/// coreturnStmt()
/// matches 'co_return'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt>
coreturnStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches co_await expressions.
///
/// Given
/// \code
/// co_await 1;
/// \endcode
/// coawaitExpr()
/// matches 'co_await 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
coawaitExpr;
/// Matches co_await expressions where the type of the promise is dependent
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
dependentCoawaitExpr;
/// Matches co_yield expressions.
///
/// Given
/// \code
/// co_yield 1;
/// \endcode
/// coyieldExpr()
/// matches 'co_yield 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
coyieldExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches C11 _Generic expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr>
genericSelectionExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
/// See also the binaryOperation() matcher for more-general matching.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches any of the \p NodeMatchers with InnerMatchers nested within
///
/// Given
/// \code
/// if (true);
/// for (; true; );
/// \endcode
/// with the matcher
/// \code
/// mapAnyOf(ifStmt, forStmt).with(
/// hasCondition(cxxBoolLiteralExpr(equals(true)))
/// ).bind("trueCond")
/// \endcode
/// matches the \c if and the \c for. It is equivalent to:
/// \code
/// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true)));
/// anyOf(
/// ifStmt(trueCond).bind("trueCond"),
/// forStmt(trueCond).bind("trueCond")
/// );
/// \endcode
///
/// The with() chain-call accepts zero or more matchers which are combined
/// as-if with allOf() in each of the node matchers.
/// Usable as: Any Matcher
template <typename T, typename... U>
auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) {
return internal::MapAnyOfHelper<U...>();
}
/// Matches nodes which can be used with binary operators.
///
/// The code
/// \code
/// var1 != var2;
/// \endcode
/// might be represented in the clang AST as a binaryOperator, a
/// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on
///
/// * whether the types of var1 and var2 are fundamental (binaryOperator) or at
/// least one is a class type (cxxOperatorCallExpr)
/// * whether the code appears in a template declaration, if at least one of the
/// vars is a dependent-type (binaryOperator)
/// * whether the code relies on a rewritten binary operator, such as a
/// spaceship operator or an inverted equality operator
/// (cxxRewrittenBinaryOperator)
///
/// This matcher elides details in places where the matchers for the nodes are
/// compatible.
///
/// Given
/// \code
/// binaryOperation(
/// hasOperatorName("!="),
/// hasLHS(expr().bind("lhs")),
/// hasRHS(expr().bind("rhs"))
/// )
/// \endcode
/// matches each use of "!=" in:
/// \code
/// struct S{
/// bool operator!=(const S&) const;
/// };
///
/// void foo()
/// {
/// 1 != 2;
/// S() != S();
/// }
///
/// template<typename T>
/// void templ()
/// {
/// 1 != 2;
/// T() != S();
/// }
/// struct HasOpEq
/// {
/// bool operator==(const HasOpEq &) const;
/// };
///
/// void inverse()
/// {
/// HasOpEq s1;
/// HasOpEq s2;
/// if (s1 != s2)
/// return;
/// }
///
/// struct HasSpaceship
/// {
/// bool operator<=>(const HasOpEq &) const;
/// };
///
/// void use_spaceship()
/// {
/// HasSpaceship s1;
/// HasSpaceship s2;
/// if (s1 != s2)
/// return;
/// }
/// \endcode
extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator>
binaryOperation;
/// Matches function calls and constructor calls
///
/// Because CallExpr and CXXConstructExpr do not share a common
/// base class with API accessing arguments etc, AST Matchers for code
/// which should match both are typically duplicated. This matcher
/// removes the need for duplication.
///
/// Given code
/// \code
/// struct ConstructorTakesInt
/// {
/// ConstructorTakesInt(int i) {}
/// };
///
/// void callTakesInt(int i)
/// {
/// }
///
/// void doCall()
/// {
/// callTakesInt(42);
/// }
///
/// void doConstruct()
/// {
/// ConstructorTakesInt cti(42);
/// }
/// \endcode
///
/// The matcher
/// \code
/// invocation(hasArgument(0, integerLiteral(equals(42))))
/// \endcode
/// matches the expression in both doCall and doConstruct
extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>({std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadedOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches template-dependent, but known, member names.
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the known name of members.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()`
AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) {
return Node.getMember().getAsString() == N;
}
/// Matches template-dependent, but known, member names against an already-bound
/// node
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the name of already-bound VarDecl, FieldDecl
/// and CXXMethodDecl nodes.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// The matcher
/// @code
/// \c cxxDependentScopeMemberExpr(
/// hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
/// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has(
/// cxxMethodDecl(hasName("mem")).bind("templMem")
/// )))))
/// )))),
/// memberHasSameNameAsBoundNode("templMem")
/// )
/// @endcode
/// first matches and binds the @c mem member of the @c S template, then
/// compares its name to the usage in @c s.mem() in the @c x function template
AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode,
std::string, BindingID) {
auto MemberName = Node.getMember().getAsString();
return Builder->removeBindings(
[this, MemberName](const BoundNodesMap &Nodes) {
const auto &BN = Nodes.getNode(this->BindingID);
if (const auto *ND = BN.get<NamedDecl>()) {
if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND))
return true;
return ND->getName() != MemberName;
}
return true;
});
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
BoundNodesTreeBuilder Result(*Builder);
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, &Result);
if (MatchIt == Node.method_end())
return false;
if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit())
return false;
*Builder = std::move(Result);
return true;
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>(
InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// asString("class X")))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl, CXXBaseSpecifier),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// cxxRecordDecl(hasName("X"))))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder,
Builder) != Node.decls_end();
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N) {
unsigned NumArgs = Node.getNumArgs();
if (!Finder->isTraversalIgnoringImplicitNodes())
return NumArgs == N;
while (NumArgs) {
if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1)))
break;
--NumArgs;
}
return NumArgs == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
if (N >= Node.getNumArgs())
return false;
const Expr *Arg = Node.getArg(N);
if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg))
return false;
return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
if (MatchIt == Node.init_end())
return false;
return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes();
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
if (Finder->isTraversalIgnoringImplicitNodes() &&
isa<CXXDefaultArgExpr>(Arg))
break;
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches all arguments and their respective types for a \c CallExpr or
/// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but
/// it works on calls through function pointers as well.
///
/// The difference is, that function pointers do not provide access to a
/// \c ParmVarDecl, but only the \c QualType for each argument.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// void (*f_ptr)(int) = f;
/// f_ptr(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParamType(
/// declRefExpr(to(varDecl(hasName("y")))),
/// qualType(isInteger()).bind("type)
/// ))
/// matches f(y) and f_ptr(y)
/// with declRefExpr(...)
/// matching int y
/// and qualType(...)
/// matching int
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<QualType>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
const FunctionProtoType *FProto = nullptr;
if (const auto *Call = dyn_cast<CallExpr>(&Node)) {
if (const auto *Value =
dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) {
QualType QT = Value->getType().getCanonicalType();
// This does not necessarily lead to a `FunctionProtoType`,
// e.g. K&R functions do not have a function prototype.
if (QT->isFunctionPointerType())
FProto = QT->getPointeeType()->getAs<FunctionProtoType>();
if (QT->isMemberFunctionPointerType()) {
const auto *MP = QT->getAs<MemberPointerType>();
assert(MP && "Must be member-pointer if its a memberfunctionpointer");
FProto = MP->getPointeeType()->getAs<FunctionProtoType>();
assert(FProto &&
"The call must have happened through a member function "
"pointer");
}
}
}
int ParamIndex = 0;
bool Matched = false;
unsigned NumArgs = Node.getNumArgs();
if (FProto && FProto->isVariadic())
NumArgs = std::min(NumArgs, FProto->getNumParams());
for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
if (FProto) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, hasType(ParamMatcher))))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, hasType(ParamMatcher)))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder,
Builder) != Node.param_end();
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches weak function declarations.
///
/// Given:
/// \code
/// void foo() __attribute__((__weakref__("__foo")));
/// void bar();
/// \endcode
/// functionDecl(isWeak())
/// matches the weak declaration "foo", but not "bar".
AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); }
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body. Note that in case of functions
/// this matcher only matches the definition itself and not the other
/// declarations of the same function.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
///
/// Given
/// \code
/// void f();
/// void f() {}
/// \endcode
/// hasBody(functionDecl())
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node))
return false;
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches a function declaration that has a given body present in the AST.
/// Note that this matcher matches all the declarations of a function whose
/// body is present in the AST.
///
/// Given
/// \code
/// void f();
/// void f() {}
/// void g();
/// \endcode
/// functionDecl(hasAnyBody(compoundStmt()))
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void g();'
AST_MATCHER_P(FunctionDecl, hasAnyBody,
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = Node.getBody();
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder,
Builder) != CS->body_end();
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>(
Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::string, Name) {
if (Optional<StringRef> OpName = internal::getOpName(Node))
return *OpName == Name;
return false;
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::vector<std::string>>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = internal::getLHS(Node);
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = internal::getRHS(Node);
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
AST_POLYMORPHIC_MATCHER_P(
hasEitherOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, InnerMatcher) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(
hasOperands,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1))))
.matches(Node, Finder, Builder);
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator,
CXXOperatorCallExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Operand = internal::getSubExpr(Node);
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder,
Builder) != Node.shadow_end();
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whoes decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten())
continue;
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
return anyOf(
gnuNullExpr(), cxxNullPtrLiteralExpr(),
integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
}
/// Matches the DecompositionDecl the binding belongs to.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// bindingDecl(hasName("f"),
/// forDecomposition(decompositionDecl())
/// \endcode
/// matches 'f' in 'auto &[f, s, t]'.
AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>,
InnerMatcher) {
if (const ValueDecl *VD = Node.getDecomposedDecl())
return InnerMatcher.matches(*VD, Finder, Builder);
return false;
}
/// Matches the Nth binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasBinding(0,
/// bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N,
internal::Matcher<BindingDecl>, InnerMatcher) {
if (Node.bindings().size() <= N)
return false;
return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder);
}
/// Matches any binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>,
InnerMatcher) {
return llvm::any_of(Node.bindings(), [&](const auto *Binding) {
return InnerMatcher.matches(*Binding, Finder, Builder);
});
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder,
Builder) != Clauses.end();
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
pair_dist.c | /* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*********************************************************************
* Clustal Omega - Multiple sequence alignment
*
* Copyright (C) 2010 University College Dublin
*
* Clustal-Omega is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is part of Clustal-Omega.
*
********************************************************************/
/*
* RCS $Id: pair_dist.c 301 2016-06-13 13:32:55Z fabian $
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <ctype.h>
#include <assert.h>
#include <time.h>
/* only neededfor iNumberOfThreads */
#include "clustal-omega.h"
#include "ktuple_pair.h"
#include "pair_dist.h"
#include "progress.h"
#include "util.h"
/* Made iend/jend const unsigned long int (originally just int), FS, 2016-04-04
*/
/* Up to rev 173 we had a USE_SYM_KTUPLE switch implemented here. When active
* ktuple distances were computed twice for each pair and averaged. Idea was
* to avoid assymmetries in the pairwise scores (score(a, b) is often not the
* same as score(b, a)). Results on BAliBASE indicate that this is overkill:
*
* r92_default core columns: avg-sp=0.800656 avg-tc=0.47711 (of total 218)
* r93-mod--norm-ktuple/ core columns: avg-sp=0.800656 avg-tc=0.47711 (of total 218)
* r93-mod--sym-ktuple/ core columns: avg-sp=0.801083 avg-tc=0.476544 (of total 217)
* r93-mod--rand-ktuple-1 core columns: avg-sp=0.799289 avg-tc=0.468028 (of total 218)
* r93-mod--rand-ktuple-2 core columns: avg-sp=0.801654 avg-tc=0.47659 (of total 217)
* r93-mod--rand-ktuple-3 core columns: avg-sp=0.800234 avg-tc=0.474908 (of total 218)
* r93-mod--rand-ktuple-4 core columns: avg-sp=0.800573 avg-tc=0.476514 (of total 218)
* r93-mod--rand-ktuple-5 core columns: avg-sp=0.799679 avg-tc=0.468716 (of total 218)
*
*/
static double
KimuraCorrection(double frac_id);
static int
SquidIdPairDist(symmatrix_t *tmat, mseq_t *mseq,
int istart, const unsigned long int iend,
int jstart, const unsigned long int jend,
bool use_KimuraCorrection, progress_t *prProgress,
unsigned long int *ulStepNo, unsigned long int ulTotalStepNo);
/* Taken from Muscle's msadistkimura.cpp */
static int DAYHOFF_PAMS[]={
195, /* 75.0% observed d; 195 PAMs estimated = 195% estimated d */
196, /* 75.1% observed d; 196 PAMs estimated */
197, 198, 199, 200, 200, 201, 202, 203,
204, 205, 206, 207, 208, 209, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 226, 227, 228, 229, 230, 231, 232, 233,
234, 236, 237, 238, 239, 240, 241, 243, 244, 245,
246, 248, 249, 250, /* 250 PAMs = 80.3% observed d */
252, 253, 254, 255, 257, 258,
260, 261, 262, 264, 265, 267, 268, 270, 271, 273,
274, 276, 277, 279, 281, 282, 284, 285, 287, 289,
291, 292, 294, 296, 298, 299, 301, 303, 305, 307,
309, 311, 313, 315, 317, 319, 321, 323, 325, 328,
330, 332, 335, 337, 339, 342, 344, 347, 349, 352,
354, 357, 360, 362, 365, 368, 371, 374, 377, 380,
383, 386, 389, 393, 396, 399, 403, 407, 410, 414,
418, 422, 426, 430, 434, 438, 442, 447, 451, 456,
461, 466, 471, 476, 482, 487, 493, 498, 504, 511,
517, 524, 531, 538, 545, 553, 560, 569, 577, 586,
595, 605, 615, 626, 637, 649, 661, 675, 688, 703,
719, 736, 754, 775, 796, 819, 845, 874, 907, 945,
/* 92.9% observed; 945 PAMs */
988 /* 93.0% observed; 988 PAMs */
};
static int DAYHOFF_TABLE_ENTRIES = sizeof(DAYHOFF_PAMS)/sizeof(DAYHOFF_PAMS[0]);
/**
*
* @brief Compute Kimura corrected distance.
*
* Original Muscle documentation following:
* """
* This is defined to be:
* log_e(1 - p - p*p/5)
* where p is the fraction of residues that differ, i.e.:
* p = (1 - fractional_conservation)
* This measure is infinite for p = 0.8541 and is considered
* unreliable for p >= 0.75 (according to the ClustalW docs).
* ClustalW uses a table lookup for values > 0.75. The following table
* was copied from the ClustalW file dayhoff.h.
* """
*
* @note copied from Muscle's msadistkimura.cpp:KimuraDist()
*
* @warning For protein only (uses Dayhoff substitution parameters)
*
* @param[in] p
* distance, e.g. 1.0 - fractional/relative identity
*
* @return The Kimura corrected distance
*
*/
double
KimuraCorrection(double p)
{
int table_index;
/* Typical case: use Kimura's empirical formula */
if (p < 0.75)
return -log(1 - p - (p*p)/5);
/* Per ClustalW, return 10.0 for anything over 93% */
if (p > 0.93)
return 10.0;
/* If 0.75 >= p <= 0.93, use table lookup */
table_index = (int) ((p - 0.75)*1000 + 0.5);
if (table_index < 0 || table_index >= DAYHOFF_TABLE_ENTRIES)
Log(&rLog, LOG_FATAL, "Internal error in %s:%s", __FILE__, __FUNCTION__);
return DAYHOFF_PAMS[table_index] / 100.0;
}
/*** end: KimuraCorrection() ***/
/**
* @brief Compute distances between all aligned sequence pairs using
* squid's PairwiseIdentity, which is: idents / MIN(len1, len2)
*
* @param[out] tmat
* Where to store the computed distances
* @param[in] mseq
* The aligned sequences
* @param[in] istart
* For distances [i][j] i>=istart, i<j
* @param[in] iend
* For distances [i][j] i<iend, i<j
* @param[in] jstart
* For distances [i][j] j>=jstart, i<j
* @param[in] jend
* For distances [i][j] i<j<jend, i<j
* @param[in] use_kimura
* Use Kimura corrected values (Proteins only)
*
* @return Non-zero on error
*
*/
int
SquidIdPairDist(symmatrix_t *tmat, mseq_t *mseq,
int istart, const unsigned long int iend,
int jstart, const unsigned long int jend,
bool use_kimura, progress_t *prProgress,
unsigned long int *ulStepNo, unsigned long int ulTotalStepNo)
{
int i, j; /* aux */
/* progress_t *prProgress; */
bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE;
/* unsigned long int ulStepNo;
unsigned long ulTotalStepNo; */
assert(NULL != tmat);
assert(NULL != mseq);
if (TRUE != mseq->aligned) {
Log(&rLog, LOG_ERROR, "Sequences need to be aligned (%s)", __FUNCTION__);
return -1;
}
if (SEQTYPE_PROTEIN != mseq->seqtype && TRUE == use_kimura) {
Log(&rLog, LOG_WARN, "Using Kimura distance corretion which includes Dayhoff substitution table lookup for non-protein sequences");
}
NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO),
"Pairwise distance calculation progress", bPrintCR);
/* estimation of total number of steps (if istart and jstart are
* both 0)
*/
/* ulTotalStepNo = iend*jend - iend*iend/2 + iend/2;
ulStepNo = 0; */
/*LOG_DEBUG("istart=%d iend=%d jstart=%d jend=%d", istart, iend, jstart, jend);*/
for (i=istart; i<iend; ++i) {
/* by definition a sequence compared to itself should give a
score of 0 */
SymMatrixSetValue(tmat, i, i, 0.0);
#ifdef HAVE_OPENMP
#pragma omp critical(squidid)
#endif
{
ProgressLog(prProgress, *ulStepNo, ulTotalStepNo, FALSE);
}
for (j=MAX(i+1, jstart); j<jend; ++j) {
float dist;
dist = 1.0 - PairwiseIdentity(mseq->seq[i], mseq->seq[j]);
#ifdef HAVE_OPENMP
#pragma omp atomic
#endif
(*ulStepNo)++;
/*LOG_DEBUG("%d:%d raw dist = %f", i, j, dist);*/
if (use_kimura) {
dist = KimuraCorrection(dist);
/*LOG_DEBUG("cor dist = %f", dist);*/
}
SymMatrixSetValue(tmat, i, j, dist);
#ifdef HAVE_OPENMP
#pragma omp critical(squidid)
#endif
{
Log(&rLog, LOG_DEBUG, "Aligned distance for sequence pair %d:%d= %lg",
i+1, j+1, dist);
}
}
}
return 0;
}
/*** end: SquidIdPairDist() ***/
/**
* @brief compute or read precomputed distances for given sequences
*
* @param[out] distmat
* Distances will be written to this matrix. will be allocated here as
* well. Caller must free with FreeSymMatrix()
* @param[in] mseq
* Distances will be computed for these sequences
* @param[in] pairdist_type
* Type of pairwise distance comparison
* @param[in] fdist_in
* If not NULL, sequences will be written from this file instead of
* computing them
* @param[in] istart
* Compute distances for sequences i:j, i>=istart, i<j.
* Usually 0.
* @param[in] iend
* Compute distances for sequences i:j, i<iend, i<j
* Usually mseq->nseqs.
* @param[in] jstart
* Compute distances for sequences i:j, j>=jstart, i<j
* Usually 0.
* @param[in] jend
* Compute distances for sequences i:j, j<iend, i<j
* Usually mseq->nseqs.
* @param[in] fdist_out
* If not NULL, distances will be written to this files
*
*
*/
int
PairDistances(symmatrix_t **distmat, mseq_t *mseq, int pairdist_type, bool bPercID,
int istart, const unsigned long int iend,
int jstart, const unsigned long int jend,
char *fdist_in, char *fdist_out)
{
int uSeqIndex;
unsigned long int ulStepNo = 0, ulTotalStepNo; /* DD: moved from SquidIdPairDist so progress bar works multithreaded */
int iChunk, iChunkStart, iChunkEnd;
int iChunkStarts[iNumberOfThreads];
int iChunkEnds[iNumberOfThreads];
progress_t *prProgress = NULL;
int iSquidSuccess = 0;
bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE;
assert(NULL!=distmat);
assert(NULL!=mseq);
assert(istart<iend);
assert(jstart<jend);
/* compute pairwise distances or read from file
*
*/
#if 0
#include "random-dist.h"
#else
if (NULL != fdist_in) {
Log(&rLog, LOG_WARN,
"Please use distance matrix input only, if you know exactly what you're doing!");
if (SymMatrixRead(fdist_in, distmat, mseq)) {
Log(&rLog, LOG_FATAL, "%s", "Reading distance matrix failed");
}
} else {
if (NewSymMatrix(distmat, iend, jend)!=0) {
Log(&rLog, LOG_FATAL, "%s", "Memory allocation for distance matrix failed");
}
/* break into chunks, one for each thread
matrix is a triangle, not a square
hence making even chunk sizes is slightly fiddlier
*/
ulTotalStepNo = iend*jend - iend*iend/2 + iend/2;
/* FIXME: can get rid of iChunkStart, iChunkEnd now that we're using the arrays */
iChunkStart = iend;
for(iChunk = 0; iChunk <= iNumberOfThreads; iChunk++)
{
iChunkEnd = iChunkStart;
if (iChunk == iNumberOfThreads - 1){
iChunkStart = 0;
}
else if (iend == jend){
iChunkStart = iend - ((double)(iend - istart) * sqrt(((double)iChunk + 1.0)/(double)iNumberOfThreads));
}
else {
iChunkStart = iend - (iend - istart) * (iChunk + 1) / (double)(iNumberOfThreads);
}
iChunkStarts[iChunk] = iChunkStart;
iChunkEnds[iChunk] = iChunkEnd;
/*printf("%s:%d: C=%d, ie=%d, is=%d, je=%d, js=%d, Cstart=%d, Cend=%d, diff=%d\n",
__FILE__, __LINE__, iChunk, iend, istart, jend, jstart, iChunkStart, iChunkEnd, iChunkEnd-iChunkStart);*/
}
if (PAIRDIST_KTUPLE == pairdist_type) {
Log(&rLog, LOG_INFO, "Calculating pairwise ktuple-distances...");
NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO),
"Ktuple-distance calculation progress", bPrintCR);
#ifdef HAVE_OPENMP
#pragma omp parallel for private(iChunk) schedule(dynamic)
#endif
for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++)
{
KTuplePairDist((*distmat), mseq, iChunkStarts[iChunk],
iChunkEnds[iChunk], jstart, jend, NULL, prProgress,
&ulStepNo, ulTotalStepNo);
}
#if 0
printf("total ops %d\n", ulStepNo);
#endif
/* old format:
KTuplePairDist((*distmat), mseq,
istart, iend,
jstart, jend, NULL); */
} else if (PAIRDIST_SQUIDID == pairdist_type) {
Log(&rLog, LOG_INFO, "Calculating pairwise aligned identity distances...");
NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO),
"Pairwise identity calculation progress", bPrintCR);
#ifdef HAVE_OPENMP
#pragma omp parallel for private(iChunk) schedule(dynamic)
#endif
for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++)
{
iSquidSuccess = SquidIdPairDist((*distmat), mseq,
iChunkStarts[iChunk], iChunkEnds[iChunk],
jstart, jend, FALSE, prProgress,
&ulStepNo, ulTotalStepNo);
}
if(iSquidSuccess != 0)
return -1;
} else if (PAIRDIST_SQUIDID_KIMURA == pairdist_type) {
Log(&rLog, LOG_INFO, "Calculating Kimura-corrected pairwise aligned identity distances...");
NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO),
"Pairwise identity calculation progress", bPrintCR);
#ifdef HAVE_OPENMP
#pragma omp parallel for private(iChunk) schedule(dynamic)
#endif
for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++)
{
iSquidSuccess = SquidIdPairDist((*distmat), mseq,
iChunkStarts[iChunk], iChunkEnds[iChunk],
jstart, jend, TRUE, prProgress,
&ulStepNo, ulTotalStepNo);
}
if(iSquidSuccess != 0)
return -1;
} else {
Log(&rLog, LOG_FATAL, "INTERNAL ERROR: don't know about pairdist_type %d",
pairdist_type);
}
}
#endif /* random/proper distance calculation */
/* optional printing of matrix to file
*/
if (NULL != fdist_out) {
/* need a copy of sequence names for printing */
char **names;
names = (char **)CKMALLOC(mseq->nseqs * sizeof(char*));
for (uSeqIndex=0; uSeqIndex<mseq->nseqs; uSeqIndex++) {
names[uSeqIndex] = mseq->sqinfo[uSeqIndex].name;
}
SymMatrixPrint((*distmat), names, fdist_out, bPercID);
Log(&rLog, LOG_INFO, "Pairwise distance matrix written to %s",
fdist_out);
CKFREE(names);
}
#if 0
#include "distance-distrib.h"
#endif
if (NULL != prProgress) {
ProgressDone(prProgress);
FreeProgress(&prProgress);
}
return 0;
}
/*** end: PairDistances() ***/
|
GB_unaryop__lnot_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_uint64
// op(A') function: GB_tran__lnot_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_uint64
(
float *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
multiple_compilation_unit_a.c | #include <stdio.h>
#include "multiple_compilation_unit.h"
#define N 5
#define M 10
int main() {
int tab[N][M];
/* The schedule(static, 1) enforces each iteration to be executed in a
different thread, whatever the number of CPU is: */
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++) {
#pragma smecy map(PE,i) \
arg(1,out,[N][M],/[i][]) \
arg(2,in)
init(&tab[i][0], M);
}
for (int i = 0; i < N; i++) {
printf("Line %d :", i);
for (int j = 0; j < M; j++)
printf(" %d", tab[i][j]);
puts("");
}
return 0;
}
|
fftw.h | #ifndef __FFTW_H__
#define __FFTW_H__
#include <omp.h>
#include <cassert>
#include <iostream>
#include "types.h"
#include "index.h"
#include <fftw3.h>
namespace Impl {
struct FFT {
int nx1_, nx2_, nb_batches_;
int nx1h_, nx2h_;
fftw_plan forward_c2c_plan_, forward_r2c_plan_;
fftw_plan backward_c2c_plan_, backward_c2r_plan_;
// Thread private buffer
complex64 *dptr_buffer_c_;
complex64 *thread_private_buffers_nx1h_, *thread_private_buffers_nx2_;
complex64 *thread_private_buffers_nx2_out_;
float64 *thread_private_buffers_nx1_r2c_;
float64 *thread_private_buffers_nx1_c2r_;
complex_view_3d d_buffer_c_;
complex_view_2d d_thread_private_buffers_nx1h_, d_thread_private_buffers_nx2_;
complex_view_2d d_thread_private_buffers_nx2_out_;
view_2d d_buffers_nx1_r2c_, d_buffers_nx1_c2r_;
FFT(int nx1, int nx2)
: nx1_(nx1), nx2_(nx2), nb_batches_(1) {
init();
}
FFT(int nx1, int nx2, int batch)
: nx1_(nx1), nx2_(nx2), nb_batches_(batch) {
init();
}
virtual ~FFT() {
fftw_destroy_plan(forward_c2c_plan_);
fftw_destroy_plan(backward_c2c_plan_);
fftw_destroy_plan(forward_r2c_plan_);
fftw_destroy_plan(backward_c2r_plan_);
deallocate(d_buffer_c_);
deallocate(d_thread_private_buffers_nx1h_);
deallocate(d_thread_private_buffers_nx2_);
deallocate(d_thread_private_buffers_nx2_out_);
deallocate(d_buffers_nx1_r2c_);
deallocate(d_buffers_nx1_c2r_);
}
void fft(complex64 *dptr_in, complex64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft(forward_c2c_plan_, in, out);
}
void fftr2c(float64 *dptr_in, complex64 *dptr_out) {
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft_r2c(forward_r2c_plan_, dptr_in, out);
}
void ifft(complex64 *dptr_in, complex64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft(backward_c2c_plan_, in, out);
}
void ifftc2r(complex64 *dptr_in, float64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_execute_dft_c2r(backward_c2r_plan_, in, dptr_out);
}
/* In the host code, we assume LayoutRight (C style)
*/
void fft2(float64 *dptr_in, complex64 *dptr_out) {
if(nb_batches_ == 1) {
fft2_serial(dptr_in, dptr_out);
}
else {
fft2_batch(dptr_in, dptr_out);
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in(nx1h,nx2,batch)
* @param[out] dptr_out(nx1,nx2,batch)
*/
void ifft2(complex64 *dptr_in, float64 *dptr_out) {
if(nb_batches_ == 1) {
ifft2_serial(dptr_in, dptr_out);
}
else {
ifft2_batch(dptr_in, dptr_out);
}
}
private:
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[nx2,nx1)
* @param[out] dptr_out[nx2,nx1h]
*/
void fft2_serial(float64 *dptr_in, complex64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_r2c_[nx1_*tid];
complex64 *thread_private_buffer_nx1h = &thread_private_buffers_nx1h_[nx1h_*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
// Fourier Transform in x direction
#pragma omp for schedule(static)
for(int ix2=0; ix2 < nx2_; ix2++) {
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1_, nx2_);
thread_private_buffer_nx1[ix1] = dptr_in[idx];
}
fftr2c(thread_private_buffer_nx1, thread_private_buffer_nx1h);
// Transpose [nx2,nx1h] -> [nx1h,nx2]
for(int ix1=0; ix1 < nx1h_; ix1++) {
int idx = Index::coord_2D2int(ix2, ix1, nx2_, nx1h_);
dptr_buffer_c_[idx] = thread_private_buffer_nx1h[ix1];
}
}
// Fourier Transform in y direction
#pragma omp for schedule(static)
for(int ix1=0; ix1 < nx1h_; ix1++) {
int offset = nx2_ * ix1;
fft(&dptr_buffer_c_[offset], thread_private_buffer_nx2);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_, nx2_);
dptr_out[idx] = thread_private_buffer_nx2[ix2];
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[batch,nx2,nx1]
* @param[out] dptr_out[batch,nx2,nx1h]
*/
void fft2_batch(float64 *dptr_in, complex64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_r2c_[nx1_*tid];
complex64 *thread_private_buffer_nx1h = &thread_private_buffers_nx1h_[nx1h_*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
// Fourier Transform in x direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib<nb_batches_; ib++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1_, nx2_, nb_batches_);
thread_private_buffer_nx1[ix1] = dptr_in[idx];
}
fftr2c(thread_private_buffer_nx1, thread_private_buffer_nx1h);
// Transpose [batch,nx2,nx1h] -> [batch,nx1h,nx2]
for(int ix1=0; ix1 < nx1h_; ix1++) {
int idx = Index::coord_3D2int(ix2, ix1, ib, nx2_, nx1h_, nb_batches_);
dptr_buffer_c_[idx] = thread_private_buffer_nx1h[ix1];
}
}
}
// Fourier Transform in y direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib<nb_batches_; ib++) {
for(int ix1=0; ix1 < nx1h_; ix1++) {
int offset = nx2_ * Index::coord_2D2int(ix1, ib, nx1h_, nb_batches_);
fft(&dptr_buffer_c_[offset], thread_private_buffer_nx2);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
dptr_out[idx] = thread_private_buffer_nx2[ix2];
}
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for serial case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[nx2,nx1h]
* @param[out] dptr_out[nx2,nx1]
*/
void ifft2_serial(complex64 *dptr_in, float64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_c2r_[(nx1_+2)*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
complex64 *thread_private_buffer_nx2_out = &thread_private_buffers_nx2_out_[nx2_*tid];
// Inverse Fourier Transform in y direction
#pragma omp for schedule(static)
for(int ix1=0; ix1 < nx1h_; ix1++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_,nx2_);
thread_private_buffer_nx2[ix2] = dptr_in[idx];
}
ifft(thread_private_buffer_nx2, thread_private_buffer_nx2_out);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_, nx2_);
dptr_buffer_c_[idx] = thread_private_buffer_nx2_out[ix2];
}
}
// Inverse Fourier Transform in x direction
#pragma omp for schedule(static)
for(int ix2=0; ix2 < nx2_; ix2++) {
int offset_in = nx1h_ * ix2;
ifftc2r(&dptr_buffer_c_[offset_in], thread_private_buffer_nx1);
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1_, nx2_);
dptr_out[idx] = thread_private_buffer_nx1[ix1];
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[batch,nx2,nx1h]
* @param[out] dptr_out[batch,nx2,nx1]
*/
void ifft2_batch(complex64 *dptr_in, float64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_c2r_[(nx1_+2)*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
complex64 *thread_private_buffer_nx2_out = &thread_private_buffers_nx2_out_[nx2_*tid];
// Inverse Fourier Transform in y direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib < nb_batches_; ib++) {
for(int ix1=0; ix1 < nx1h_; ix1++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
thread_private_buffer_nx2[ix2] = dptr_in[idx];
}
ifft(thread_private_buffer_nx2, thread_private_buffer_nx2_out);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
dptr_buffer_c_[idx] = thread_private_buffer_nx2_out[ix2];
}
}
}
// Inverse Fourier Transform in x direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib < nb_batches_; ib++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int offset = nx1h_ * Index::coord_2D2int(ix2, ib, nx2_, nb_batches_);
ifftc2r(&dptr_buffer_c_[offset], thread_private_buffer_nx1);
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1_, nx2_, nb_batches_);
dptr_out[idx] = thread_private_buffer_nx1[ix1];
}
}
}
#pragma omp barrier
}
}
void init() {
nx1h_ = nx1_/2 + 1;
nx2h_ = nx2_/2 + 1;
assert(nb_batches_ >= 1);
// Initialize fftw
fftw_complex *c_in, *c_out;
fftw_complex *c_in_c2r, *c_out_r2c;
float64 *in, *out;
c_in = fftw_alloc_complex(nx2_);
c_out = fftw_alloc_complex(nx2_);
in = fftw_alloc_real(nx1_);
out = fftw_alloc_real(nx1_+2);
c_in_c2r = fftw_alloc_complex(nx1h_);
c_out_r2c = fftw_alloc_complex(nx1h_);
forward_c2c_plan_ = fftw_plan_dft_1d(nx2_, c_in, c_out, FFTW_FORWARD, FFTW_ESTIMATE);
backward_c2c_plan_ = fftw_plan_dft_1d(nx2_, c_out, c_in, FFTW_BACKWARD, FFTW_ESTIMATE);
forward_r2c_plan_ = fftw_plan_dft_r2c_1d(nx1_, in, c_out_r2c, FFTW_ESTIMATE);
backward_c2r_plan_ = fftw_plan_dft_c2r_1d(nx1_, c_in_c2r, out, FFTW_ESTIMATE);
fftw_free(in); fftw_free(out);
fftw_free(c_in); fftw_free(c_out);
fftw_free(c_in_c2r); fftw_free(c_out_r2c);
// Malloc thread private buffers
size_t nb_threads=0;
#pragma omp parallel
nb_threads = static_cast<size_t>( omp_get_num_threads() );
std::cout << "nb_threads = " << nb_threads << std::endl;
size_t nx1 = nx1_, nx2 = nx2_, nx1h = nx1h_, nb_batches = nb_batches_;
allocate(d_buffer_c_, {nx2, nx1h, nb_batches});
allocate(d_thread_private_buffers_nx1h_, {nx1h,nb_threads});
allocate(d_thread_private_buffers_nx2_, {nx2,nb_threads});
allocate(d_thread_private_buffers_nx2_out_, {nx2,nb_threads});
allocate(d_buffers_nx1_r2c_, {nx1, nb_threads});
allocate(d_buffers_nx1_c2r_, {nx1+2,nb_threads});
dptr_buffer_c_ = d_buffer_c_.raw();
thread_private_buffers_nx1h_ = d_thread_private_buffers_nx1h_.raw();
thread_private_buffers_nx2_ = d_thread_private_buffers_nx2_.raw();
thread_private_buffers_nx2_out_ = d_thread_private_buffers_nx2_out_.raw();
thread_private_buffers_nx1_r2c_ = d_buffers_nx1_r2c_.raw();
thread_private_buffers_nx1_c2r_ = d_buffers_nx1_c2r_.raw();
}
};
};
#endif
|
openmp_worksharing.c | #include <stdio.h>
#include <omp.h>
#define ITERATIONS 100
int main(int argc, char const *argv[])
{
// we will use 4 threads
omp_set_num_threads(4);
// The next pragma directive is used to distribute the for loops in the threads
#pragma omp parallel for
for (int i = 0; i < ITERATIONS; ++i)
{
//It will show a message with the value of i and the number of thread
printf("Iteration # %d from thread # %d\n", i, omp_get_thread_num());
}
return 0;
}
|
ordering_op-inl.h | #include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of ordering operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
struct fill_ind_to_one {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, DType* out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, const DType* val,
int req, DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType *vals = reinterpret_cast<DType*>(work.dptr_);
DType *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __HIPCC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
HIP_DYNAMIC_SHARED( int, buff)
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
size_t alignment = std::max(sizeof(DType), sizeof(int));
size_t id_size = PadBytes(sizeof(int) * ind.size(0), alignment);
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
hipLaunchKernelGGL((PartialSortSmallK), dim3(M), dim3(nthreads), nthreads*K*(sizeof(int)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s), K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template<typename xpu, typename DType, typename IDType>
void TopKImpl(const RunContext &ctx,
const Resource &resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(int));
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(int) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, static_cast<size_t>(sizeof(DType) * src.Size()));
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment)
+ PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(int) * batch_size * k, alignment);
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(int) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
})
});
} else {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
});
}
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
});
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx,
ctx.requested[0], req, inputs[0], outputs, topk_param);
});
});
}
template<typename xpu, typename DType, typename IDType>
void TopKBackwardImpl(const OpContext &ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 1, int> workspace =
ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k + batch_size), s);
Tensor<xpu, 1, int> sel_indices =
Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, int> batch_shift =
Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo,
batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<int>(indices);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(tcast<int>(indices) +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0] || kAddTo == req[0]) {
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
}
mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_,
out_grad.dptr_,
req[0],
in_grad.dptr_);
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param);
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param);
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype))
<< "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices to int32.";
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices to int32.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
opencl_office2010_fmt_plug.c | /* MS Office 2010 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* OpenCL support by magnum.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum and it is hereby released to the general public
* under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_office2010;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_office2010);
#else
#include "sha.h"
#include <openssl/aes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "common-opencl.h"
#include "config.h"
#define PLAINTEXT_LENGTH 51
#define UNICODE_LENGTH 104 /* In octets, including 0x80 */
#define FORMAT_LABEL "office2010-opencl"
#define FORMAT_NAME "MS Office 2010"
#define OCL_ALGORITHM_NAME "SHA1 OpenCL"
#define CPU_ALGORITHM_NAME " AES"
#define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (100,000 iterations)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_LENGTH 16
#define SALT_SIZE sizeof(*cur_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
static struct fmt_tests tests[] = {
/* 2010-Default_myhovercraftisfullofeels_.docx */
{"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"},
{NULL}
};
static struct custom_salt {
char unsigned osalt[SALT_LENGTH];
char unsigned encryptedVerifier[16];
char unsigned encryptedVerifierHash[32];
int version;
int spinCount;
int keySize;
int saltSize;
} *cur_salt;
static int *cracked, any_cracked;
static unsigned int v_width = 1; /* Vector width of kernel */
static char *saved_key; /* Password encoded in UCS-2 */
static int *saved_len; /* UCS-2 password length, in octets */
static char *saved_salt;
static unsigned char *key; /* Output key from kernel */
static int new_keys, spincount;
static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_pwhash, cl_key, cl_spincount;
static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_key;
static cl_kernel GenerateSHA1pwhash, Generate2010key;
#define HASH_LOOPS 500 /* Lower figure gives less X hogging */
#define ITERATIONS 100000
#define STEP 0
#define SEED 128
#define OCL_CONFIG "office2010"
static const char * warn[] = {
"xfer: ", ", xfer: ", ", init: ", ", loop: ", ", final: ", ", xfer: "
};
static int split_events[] = { 3, -1, -1 };
//This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, GenerateSHA1pwhash);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, Generate2010key));
return s;
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
int i;
int bench_len = strlen(tests[0].plaintext) * 2;
gws *= v_width;
pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, UNICODE_LENGTH * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_key = (char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key");
memset(saved_key, 0, UNICODE_LENGTH * gws);
pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(cl_int) * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_len = (int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len");
for (i = 0; i < gws; i++)
saved_len[i] = bench_len;
pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, SALT_LENGTH, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
saved_salt = (char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, SALT_LENGTH, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt");
memset(saved_salt, 0, SALT_LENGTH);
cl_pwhash = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 6 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device state buffer");
pinned_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating page-locked memory");
cl_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 32 * gws, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error allocating device memory");
key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 32 * gws, 0, NULL, NULL, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping page-locked memory verifier keys");
memset(key, 0, 32 * gws);
cl_spincount = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, sizeof(cl_int), &spincount, &ret_code);
HANDLE_CLERROR(ret_code, "Error mapping spincount");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 2, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 2");
HANDLE_CLERROR(clSetKernelArg(GenerateSHA1pwhash, 3, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 3");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 0, sizeof(cl_mem), (void*)&cl_pwhash), "Error setting argument 0");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 1, sizeof(cl_mem), (void*)&cl_key), "Error setting argument 1");
HANDLE_CLERROR(clSetKernelArg(Generate2010key, 2, sizeof(cl_mem), (void*)&cl_spincount), "Error setting argument 2");
cracked = mem_alloc(sizeof(*cracked) * gws);
}
static void release_clobj(void)
{
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_key, key, 0, NULL, NULL), "Error Unmapping key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len");
HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings");
HANDLE_CLERROR(clReleaseMemObject(cl_spincount), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release GPU buffer");
HANDLE_CLERROR(clReleaseMemObject(cl_pwhash), "Release GPU buffer");
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(GenerateSHA1pwhash), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(Generate2010key), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void clear_keys(void)
{
memset(saved_key, 0, UNICODE_LENGTH * global_work_size * v_width);
memset(saved_len, 0, sizeof(*saved_len) * global_work_size * v_width);
}
static void set_key(char *key, int index)
{
UTF16 *utfkey = (UTF16*)&saved_key[index * UNICODE_LENGTH];
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(utfkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(utfkey);
/* Prepare for GPU */
utfkey[saved_len[index]] = 0x80;
saved_len[index] <<= 1;
new_keys = 1;
}
static void *get_salt(char *ciphertext)
{
int i, length;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *p;
cur_salt = mem_calloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += 9; /* skip over "$office$*" */
p = strtok(ctcopy, "*");
cur_salt->version = atoi(p);
p = strtok(NULL, "*");
cur_salt->spinCount = atoi(p);
p = strtok(NULL, "*");
cur_salt->keySize = atoi(p);
p = strtok(NULL, "*");
cur_salt->saltSize = atoi(p);
if (cur_salt->saltSize > SALT_LENGTH) {
fprintf(stderr, "** error: salt longer than supported:\n%s\n", ciphertext);
cur_salt->saltSize = SALT_LENGTH; /* will not work, but protects us from segfault */
}
p = strtok(NULL, "*");
for (i = 0; i < cur_salt->saltSize; i++)
cur_salt->osalt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
for (i = 0; i < 16; i++)
cur_salt->encryptedVerifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
length = strlen(p) / 2;
for (i = 0; i < length; i++)
cur_salt->encryptedVerifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cur_salt;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy(saved_salt, cur_salt->osalt, SALT_LENGTH);
spincount = cur_salt->spinCount;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, SALT_LENGTH, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_spincount, CL_FALSE, 0, 4, &spincount, 0, NULL, NULL), "failed in clEnqueueWriteBuffer spincount");
}
static int crypt_all(int *pcount, struct db_salt *salt);
static int crypt_all_benchmark(int *pcount, struct db_salt *salt);
static void init(struct fmt_main *self)
{
char build_opts[64];
static char valgo[32] = "";
if ((v_width = opencl_get_vector_width(gpu_id,
sizeof(cl_int))) > 1) {
/* Run vectorized kernel */
snprintf(valgo, sizeof(valgo),
OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, v_width);
self->params.algorithm_name = valgo;
}
snprintf(build_opts, sizeof(build_opts),
"-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u -DV_WIDTH=%u",
HASH_LOOPS,
UNICODE_LENGTH,
v_width);
opencl_init("$JOHN/kernels/office2010_kernel.cl", gpu_id,
build_opts);
// create kernel to execute
GenerateSHA1pwhash = clCreateKernel(program[gpu_id], "GenerateSHA1pwhash", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
crypt_kernel = clCreateKernel(program[gpu_id], "HashLoop", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
Generate2010key = clCreateKernel(program[gpu_id], "Generate2010key", &ret_code);
HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events,
warn, 3, self, create_clobj, release_clobj,
UNICODE_LENGTH, 0);
// Auto tune execution from shared/included code.
self->methods.crypt_all = crypt_all_benchmark;
autotune_run(self, ITERATIONS + 4, 0,
(cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL));
self->methods.crypt_all = crypt_all;
self->params.min_keys_per_crypt = local_work_size * v_width;
self->params.max_keys_per_crypt = global_work_size * v_width;
if (pers_opts.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int res;
if (strncmp(ciphertext, "$office$*2010*", 14))
return 0;
if (!(ctcopy = strdup(ciphertext))) {
fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL);
return 0;
}
keeptr = ctcopy;
ctcopy += 15;
if (!(ptr = strtok(ctcopy, "*"))) /* hash size or iterations */
goto error;
if (!(ptr = strtok(NULL, "*")))
goto error;
if (strncmp(ptr, "128", 3) && strncmp(ptr, "256", 3)) /* key size */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* salt size */
goto error;
res = atoi(ptr);
if (res != 16) /* can we handle other values? */
goto error;
if (!(ptr = strtok(NULL, "*"))) /* salt */
goto error;
if (strlen(ptr) != res * 2)
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier */
goto error;
if (!ishex(ptr))
goto error;
if (!(ptr = strtok(NULL, "*"))) /* encrypted verifier hash */
goto error;
if (!ishex(ptr))
goto error;
if (strlen(ptr) > 64)
goto error;
if ((ptr = strtok(NULL, "*")))
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void DecryptUsingSymmetricKeyAlgorithm(unsigned char *verifierInputKey, unsigned char *encryptedVerifier, const unsigned char *decryptedVerifier, int length)
{
unsigned char iv[32];
AES_KEY akey;
memcpy(iv, cur_salt->osalt, 16);
memset(&iv[16], 0, 16);
memset(&akey, 0, sizeof(AES_KEY));
if(cur_salt->keySize == 128) {
if(AES_set_decrypt_key(verifierInputKey, 128, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
}
else {
if(AES_set_decrypt_key(verifierInputKey, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
}
AES_cbc_encrypt(encryptedVerifier, (unsigned char*)decryptedVerifier, length, &akey, iv, AES_DECRYPT);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
size_t gws, scalar_gws;
gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size;
scalar_gws = gws * v_width;
if (any_cracked) {
memset(cracked, 0, count * sizeof(*cracked));
any_cracked = 0;
}
if (new_keys) {
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_key");
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_len");
new_keys = 0;
}
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, firstEvent), "failed in clEnqueueNDRangeKernel");
for (index = 0; index < spincount / HASH_LOOPS; index++) {
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel");
HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel");
opencl_process_event();
}
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, &local_work_size, 0, NULL, lastEvent), "failed in clEnqueueNDRangeKernel");
// read back verifier keys
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 32 * scalar_gws, key, 0, NULL, NULL), "failed in reading key back");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
SHA_CTX ctx;
unsigned char hash[20];
unsigned char decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
DecryptUsingSymmetricKeyAlgorithm(&key[32*index], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
DecryptUsingSymmetricKeyAlgorithm(&key[32*index+16], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA1_Final(hash, &ctx);
if (!memcmp(hash, decryptedVerifierHashBytes, 20))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int crypt_all_benchmark(int *pcount, struct db_salt *salt)
{
int count = *pcount;
size_t gws, scalar_gws;
gws = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size;
scalar_gws = gws * v_width;
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key");
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel");
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], Generate2010key, 1, NULL, &gws, &local_work_size, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel");
// read back aes key
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back");
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *get_key(int index)
{
UTF16 buf[PLAINTEXT_LENGTH + 1];
memcpy(buf, &saved_key[index * UNICODE_LENGTH], saved_len[index]);
buf[saved_len[index] >> 1] = 0;
return (char*)utf16_to_enc(buf);
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
/*
* Is spinCount always 100000, or just in our format tests?
*/
return (unsigned int) my_salt->spinCount;
}
#endif
struct fmt_main fmt_opencl_office2010 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr0 = out;
float* outptr1 = outptr0 + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
float sum2 = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr0 = sum;
*outptr1 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
}
for (; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
DistanceField.h | /// \ingroup base
/// \class ttk::DistanceField
/// \author Guillaume Favelier <guillaume.favelier@lip6.fr>
/// \date March 2016
///
/// \brief TTK processing package for distance field computation on PL
/// manifolds.
///
/// This package takes a list of sources (a set of points with their global
/// identifiers attached to them) and produces a distance field to the closest
/// source.
///
/// \b Related \b publication \n
/// "A note on two problems in connexion with graphs" \n
/// Edsger W. Dijkstra \n
/// Numerische Mathematik, 1959.
///
/// \sa ttkDistanceField.cpp %for a usage example.
#ifndef _DISTANCEFIELD_H
#define _DISTANCEFIELD_H
// base code includes
#include<Wrapper.h>
#include<Geometry.h>
#include<Triangulation.h>
// std includes
#include<limits>
#include<set>
namespace ttk{
class DistanceField : public Debug{
public:
DistanceField();
~DistanceField();
template <typename dataType>
dataType getDistance(const SimplexId a, const SimplexId b) const;
template <typename dataType>
int execute() const;
inline int setVertexNumber(SimplexId vertexNumber){
vertexNumber_=vertexNumber;
return 0;
}
inline int setSourceNumber(SimplexId sourceNumber){
sourceNumber_=sourceNumber;
return 0;
}
inline int setupTriangulation(Triangulation* triangulation){
triangulation_=triangulation;
if(triangulation_){
triangulation_->preprocessVertexNeighbors();
}
return 0;
}
inline int setVertexIdentifierScalarFieldPointer(void* data){
vertexIdentifierScalarFieldPointer_=data;
return 0;
}
inline int setOutputScalarFieldPointer(void* data){
outputScalarFieldPointer_=data;
return 0;
}
inline int setOutputIdentifiers(void* data){
outputIdentifiers_=data;
return 0;
}
inline int setOutputSegmentation(void* data){
outputSegmentation_=data;
return 0;
}
protected:
SimplexId vertexNumber_;
SimplexId sourceNumber_;
Triangulation* triangulation_;
void* vertexIdentifierScalarFieldPointer_;
void* outputScalarFieldPointer_;
void* outputIdentifiers_;
void* outputSegmentation_;
};
}
template <typename dataType>
dataType ttk::DistanceField::getDistance(const SimplexId a, const SimplexId b) const{
float p0[3];
triangulation_->getVertexPoint(a,p0[0],p0[1],p0[2]);
float p1[3];
triangulation_->getVertexPoint(b,p1[0],p1[1],p1[2]);
return Geometry::distance(p0,p1,3);
}
template <typename dataType>
int ttk::DistanceField::execute() const{
SimplexId* identifiers=static_cast<SimplexId*>(vertexIdentifierScalarFieldPointer_);
dataType* dist=static_cast<dataType*>(outputScalarFieldPointer_);
SimplexId* origin=static_cast<SimplexId*>(outputIdentifiers_);
SimplexId* seg=static_cast<SimplexId*>(outputSegmentation_);
Timer t;
std::fill(dist,dist+vertexNumber_,std::numeric_limits<dataType>::max());
std::fill(origin,origin+vertexNumber_,-1);
// get the sources
std::set<SimplexId> isSource;
for(SimplexId k=0; k<sourceNumber_; ++k)
isSource.insert(identifiers[k]);
std::vector<SimplexId> sources;
for(auto s : isSource)
sources.push_back(s);
isSource.clear();
// comparison lambda
auto cmp=[](const std::pair<dataType,SimplexId>& a, const std::pair<dataType,SimplexId>&
b){
if(a.first != b.first) return a.first<b.first;
else return a.second<b.second;
};
// prepare output
std::vector<std::vector<dataType>> scalars(sources.size());
for(auto& k : scalars)
k.resize(vertexNumber_, std::numeric_limits<dataType>::max());
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i=0; i< (SimplexId) sources.size(); ++i){
std::vector<bool> visited(vertexNumber_,false);
std::set<std::pair<dataType,SimplexId>,decltype(cmp)> S(cmp);
{
const SimplexId s=sources[i];
scalars[i][s]=0;
visited[s]=true;
const SimplexId neighborNumber=triangulation_->getVertexNeighborNumber(s);
for(SimplexId k=0; k<neighborNumber; ++k){
SimplexId neighbor;
triangulation_->getVertexNeighbor(s,k,neighbor);
if(!visited[neighbor]){
scalars[i][neighbor]=getDistance<dataType>(s,neighbor);
S.emplace(scalars[i][neighbor],neighbor);
}
}
}
while(!S.empty()){
auto it=S.begin();
const SimplexId vertex=it->second;
if(!visited[vertex]){
const dataType vertexScalar=scalars[i][vertex];
const SimplexId neighborNumber=triangulation_->getVertexNeighborNumber(vertex);
for(SimplexId k=0; k<neighborNumber; ++k){
SimplexId neighbor;
triangulation_->getVertexNeighbor(vertex,k,neighbor);
const dataType delta=getDistance<dataType>(vertex,neighbor);
if(vertexScalar+delta<scalars[i][neighbor]){
scalars[i][neighbor]=vertexScalar+delta;
S.emplace(scalars[i][neighbor],neighbor);
}
}
visited[vertex]=true;
}
S.erase(it);
}
}
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId k=0; k<vertexNumber_; ++k){
for(SimplexId i=0; i<(SimplexId)sources.size(); ++i){
if(i==0 or dist[k]>scalars[i][k]){
dist[k]=scalars[i][k];
origin[k]=sources[i];
seg[k]=i;
}
}
}
{
std::stringstream msg;
msg << "[DistanceField] Data-set (" << vertexNumber_
<< " points) processed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
return 0;
}
#endif // DISTANCEFIELD_H
|
blast_nalookup.c | /* $Id: blast_nalookup.c 573109 2018-10-23 14:18:55Z boratyng $
* ===========================================================================
*
* PUBLIC DOMAIN NOTICE
* National Center for Biotechnology Information
*
* This software/database is a "United States Government Work" under the
* terms of the United States Copyright Act. It was written as part of
* the author's official duties as a United States Government employee and
* thus cannot be copyrighted. This software/database is freely available
* to the public for use. The National Library of Medicine and the U.S.
* Government have not placed any restriction on its use or reproduction.
*
* Although all reasonable efforts have been taken to ensure the accuracy
* and reliability of the software and data, the NLM and the U.S.
* Government do not and cannot warrant the performance or results that
* may be obtained by using this software or data. The NLM and the U.S.
* Government disclaim all warranties, express or implied, including
* warranties of performance, merchantability or fitness for any particular
* purpose.
*
* Please cite the author in any work or product based on this material.
*
* ===========================================================================
*/
/** @file blast_nalookup.c
* Functions for constructing nucleotide blast lookup tables
*/
#include <algo/blast/core/blast_nalookup.h>
#include <algo/blast/core/lookup_util.h>
#include <algo/blast/core/blast_encoding.h>
#include <algo/blast/core/blast_util.h>
#include <algo/blast/core/blast_filter.h>
/** bitfield used to detect ambiguities in uncompressed
* nucleotide letters
*/
#define BLAST2NA_MASK 0xfc
/** number of bits in a compressed nucleotide letter */
#define BITS_PER_NUC 2
ELookupTableType
BlastChooseNaLookupTable(const LookupTableOptions* lookup_options,
Int4 approx_table_entries, Int4 max_q_off,
Int4 *lut_width)
{
ELookupTableType lut_type;
/* Choose the width of the lookup table. The width may be any number
<= the word size, but the most efficient width is a compromise
between small values (which have better cache performance and
allow a larger scanning stride) and large values (which have fewer
accesses and allow fewer word extensions) The number of entries
where one table width becomes better than another is probably
machine-dependent */
ASSERT(lookup_options->word_size >= 4);
/* Discontiguous megablast must always use a megablast table */
if (lookup_options->mb_template_length > 0) {
*lut_width = lookup_options->word_size;
return eMBLookupTable;
}
/* always use megablast lookup table and word size 16 for mapping */
if (Blast_ProgramIsMapping(lookup_options->program_number) &&
lookup_options->word_size >= 16 && lookup_options->db_filter) {
*lut_width = 16;
return eNaHashLookupTable;
}
switch(lookup_options->word_size) {
case 4:
case 5:
case 6:
lut_type = eSmallNaLookupTable;
*lut_width = lookup_options->word_size;
break;
case 7:
lut_type = eSmallNaLookupTable;
if (approx_table_entries < 250)
*lut_width = 6;
else
*lut_width = 7;
break;
case 8:
lut_type = eSmallNaLookupTable;
if (approx_table_entries < 8500)
*lut_width = 7;
else
*lut_width = 8;
break;
case 9:
if (approx_table_entries < 1250) {
*lut_width = 7;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 21000) {
*lut_width = 8;
lut_type = eSmallNaLookupTable;
} else {
*lut_width = 9;
lut_type = eMBLookupTable;
}
break;
case 10:
if (approx_table_entries < 1250) {
*lut_width = 7;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 8500) {
*lut_width = 8;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 18000) {
*lut_width = 9;
lut_type = eMBLookupTable;
} else {
*lut_width = 10;
lut_type = eMBLookupTable;
}
break;
case 11:
if (approx_table_entries < 12000) {
*lut_width = 8;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 180000) {
*lut_width = 10;
lut_type = eMBLookupTable;
} else {
*lut_width = 11;
lut_type = eMBLookupTable;
}
break;
case 12:
if (approx_table_entries < 8500) {
*lut_width = 8;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 18000) {
*lut_width = 9;
lut_type = eMBLookupTable;
} else if (approx_table_entries < 60000) {
*lut_width = 10;
lut_type = eMBLookupTable;
} else if (approx_table_entries < 900000) {
*lut_width = 11;
lut_type = eMBLookupTable;
} else {
*lut_width = 12;
lut_type = eMBLookupTable;
}
break;
default:
if (approx_table_entries < 8500) {
*lut_width = 8;
lut_type = eSmallNaLookupTable;
} else if (approx_table_entries < 300000) {
*lut_width = 11;
lut_type = eMBLookupTable;
} else {
*lut_width = 12;
lut_type = eMBLookupTable;
}
break;
}
/* we only use the ordinary blastn table for cases where
the number of words to index, or the maximum query offset,
exceeds the range of the 15-bit values used in the
small blastn lookup table */
if (lut_type == eSmallNaLookupTable &&
(approx_table_entries >= 32767 || max_q_off >= 32768)) {
lut_type = eNaLookupTable;
}
return lut_type;
}
/*--------------------- Small nucleotide table ----------------------*/
/** Pack the data structures comprising a small nucleotide lookup table
* into their final form
* @param thin_backbone structure containing indexed query offsets [in][out]
* @param lookup the lookup table [in]
* @param query the query sequence [in][out]
* @return zero if packing process succeeded
*/
static Int4 s_BlastSmallNaLookupFinalize(Int4 **thin_backbone,
BlastSmallNaLookupTable * lookup,
BLAST_SequenceBlk *query)
{
Int4 i;
Int4 overflow_cells_needed = 2;
Int4 overflow_cursor = 2;
Int4 longest_chain = 0;
#ifdef LOOKUP_VERBOSE
Int4 backbone_occupancy = 0;
Int4 thick_backbone_occupancy = 0;
Int4 num_overflows = 0;
#endif
/* find out how many cells need the overflow array. The
backbone holds at most one hit per cell, so any cells
that need more than that must go into the overflow array
(along with a trailing null). */
for (i = 0; i < lookup->backbone_size; i++) {
if (thin_backbone[i] != NULL) {
Int4 num_hits = thin_backbone[i][1];
if (num_hits > 1)
overflow_cells_needed += num_hits + 1;
longest_chain = MAX(longest_chain, num_hits);
}
}
/* there is a hard limit to the number of query offsets
allowed in the overflow array. Although unlikely, it
is technically possible to index a query sequence that
has so many trailing nulls in the overflow array that
the limit gets exceeded */
if (overflow_cells_needed >= 32768) {
for (i = 0; i < lookup->backbone_size; i++)
sfree(thin_backbone[i]);
return -1;
}
/* compute a compressed representation of the query, used
for computing ungapped extensions */
BlastCompressBlastnaSequence(query);
/* allocate the new lookup table */
lookup->final_backbone = (Int2 *)malloc(
lookup->backbone_size * sizeof(Int2));
ASSERT(lookup->final_backbone != NULL);
lookup->longest_chain = longest_chain;
/* allocate the overflow array */
if (overflow_cells_needed > 0) {
lookup->overflow = (Int2 *) malloc(overflow_cells_needed *
sizeof(Int2));
ASSERT(lookup->overflow != NULL);
}
/* for each position in the lookup table backbone, */
for (i = 0; i < lookup->backbone_size; i++) {
Int4 j, num_hits;
/* skip if there are no hits in cell i */
if (thin_backbone[i] == NULL) {
lookup->final_backbone[i] = -1;
continue;
}
#ifdef LOOKUP_VERBOSE
backbone_occupancy++;
#endif
num_hits = thin_backbone[i][1];
if (num_hits == 1) {
/* if there is only one hit, it goes into the backbone */
#ifdef LOOKUP_VERBOSE
thick_backbone_occupancy++;
#endif
lookup->final_backbone[i] = thin_backbone[i][2];
}
else {
#ifdef LOOKUP_VERBOSE
num_overflows++;
#endif
/* for more than one hit, the backbone stores
-(overflow offset where hits occur). Since a
cell value of -1 is reserved to mean 'empty cell',
the value stored begins at -2 */
lookup->final_backbone[i] = -overflow_cursor;
for (j = 0; j < num_hits; j++) {
lookup->overflow[overflow_cursor++] =
thin_backbone[i][j + 2];
}
/* we don't have the room to store the number of hits,
so append a null to the end of the list to signal
that the current chain is finished */
lookup->overflow[overflow_cursor++] = -1;
}
/* done with this chain */
sfree(thin_backbone[i]);
}
lookup->overflow_size = overflow_cursor;
#ifdef LOOKUP_VERBOSE
printf("SmallNa\n");
printf("backbone size: %d\n", lookup->backbone_size);
printf("backbone occupancy: %d (%f%%)\n", backbone_occupancy,
100.0 * backbone_occupancy / lookup->backbone_size);
printf("thick_backbone occupancy: %d (%f%%)\n",
thick_backbone_occupancy,
100.0 * thick_backbone_occupancy / lookup->backbone_size);
printf("num_overflows: %d\n", num_overflows);
printf("overflow size: %d\n", overflow_cells_needed);
printf("longest chain: %d\n", longest_chain);
#endif
return 0;
}
/** Changes the list of locations into a list of
the intervals between locations (the inverse).
@param locations input list [in]
@param length (query) sequence length [in]
@return inverted BlastSeqLoc
*/
static BlastSeqLoc* s_SeqLocListInvert(const BlastSeqLoc* locations, Int4 length)
{
BlastSeqLoc* retval = NULL;
BlastSeqLoc* tail = NULL; /* Tail of the list. */
Int4 start, stop;
ASSERT(locations);
start = 0;
stop = MAX( 0, locations->ssr->left-1);
if (stop - start > 2)
tail = BlastSeqLocNew(&retval, start, stop);
while (locations)
{
start = locations->ssr->right+1;
locations = locations->next;
if (locations)
stop = locations->ssr->left-1;
else
stop = length-1;
if (retval == NULL)
tail = BlastSeqLocNew(&retval, start, stop);
else
tail = BlastSeqLocNew(&tail, start, stop);
}
return retval;
}
/** Determine whether mask at hash is enabled from the QuerySetUpOptions */
static Boolean s_HasMaskAtHashEnabled(const QuerySetUpOptions* query_options)
{
if ( !query_options ) {
return FALSE;
}
if (SBlastFilterOptionsMaskAtHash(query_options->filtering_options)) {
return TRUE;
}
if (query_options->filter_string &&
strstr(query_options->filter_string, "m")) {
return TRUE;
}
return FALSE;
}
Int4 BlastSmallNaLookupTableNew(BLAST_SequenceBlk* query,
BlastSeqLoc* locations,
BlastSmallNaLookupTable * *lut,
const LookupTableOptions * opt,
const QuerySetUpOptions* query_options,
Int4 lut_width)
{
Int4 status = 0;
Int4 **thin_backbone;
BlastSmallNaLookupTable *lookup =
(BlastSmallNaLookupTable *) calloc(1, sizeof(BlastSmallNaLookupTable));
ASSERT(lookup != NULL);
lookup->word_length = opt->word_size;
lookup->lut_word_length = lut_width;
lookup->backbone_size = 1 << (BITS_PER_NUC * lookup->lut_word_length);
lookup->mask = lookup->backbone_size - 1;
lookup->overflow = NULL;
lookup->scan_step = lookup->word_length - lookup->lut_word_length + 1;
thin_backbone = (Int4 **) calloc(lookup->backbone_size, sizeof(Int4 *));
ASSERT(thin_backbone != NULL);
BlastLookupIndexQueryExactMatches(thin_backbone,
lookup->word_length,
BITS_PER_NUC,
lookup->lut_word_length,
query, locations);
if (locations &&
lookup->word_length > lookup->lut_word_length ) {
/* because we use compressed query, we must always check masked location*/
lookup->masked_locations = s_SeqLocListInvert(locations, query->length);
}
status = s_BlastSmallNaLookupFinalize(thin_backbone, lookup, query);
if (status != 0) {
lookup = BlastSmallNaLookupTableDestruct(lookup);
}
sfree(thin_backbone);
*lut = lookup;
return status;
}
BlastSmallNaLookupTable *BlastSmallNaLookupTableDestruct(
BlastSmallNaLookupTable * lookup)
{
sfree(lookup->final_backbone);
sfree(lookup->overflow);
if (lookup->masked_locations)
lookup->masked_locations = BlastSeqLocFree(lookup->masked_locations);
sfree(lookup);
return NULL;
}
/*--------------------- Standard nucleotide table ----------------------*/
/** Pack the data structures comprising a nucleotide lookup table
* into their final form
* @param thin_backbone structure containing indexed query offsets [in][out]
* @param lookup the lookup table [in]
*/
static void s_BlastNaLookupFinalize(Int4 **thin_backbone,
BlastNaLookupTable * lookup)
{
Int4 i;
Int4 overflow_cells_needed = 0;
Int4 overflow_cursor = 0;
Int4 longest_chain = 0;
PV_ARRAY_TYPE *pv;
#ifdef LOOKUP_VERBOSE
Int4 backbone_occupancy = 0;
Int4 thick_backbone_occupancy = 0;
Int4 num_overflows = 0;
#endif
/* allocate the new lookup table */
lookup->thick_backbone = (NaLookupBackboneCell *)calloc(
lookup->backbone_size,
sizeof(NaLookupBackboneCell));
ASSERT(lookup->thick_backbone != NULL);
/* allocate the pv_array */
pv = lookup->pv = (PV_ARRAY_TYPE *)calloc(
(lookup->backbone_size >> PV_ARRAY_BTS) + 1,
sizeof(PV_ARRAY_TYPE));
ASSERT(pv != NULL);
/* find out how many cells need the overflow array */
for (i = 0; i < lookup->backbone_size; i++) {
if (thin_backbone[i] != NULL) {
Int4 num_hits = thin_backbone[i][1];
if (num_hits > NA_HITS_PER_CELL)
overflow_cells_needed += num_hits;
longest_chain = MAX(longest_chain, num_hits);
}
}
lookup->longest_chain = longest_chain;
/* allocate the overflow array */
if (overflow_cells_needed > 0) {
lookup->overflow = (Int4 *) calloc(overflow_cells_needed, sizeof(Int4));
ASSERT(lookup->overflow != NULL);
}
/* for each position in the lookup table backbone, */
for (i = 0; i < lookup->backbone_size; i++) {
Int4 j, num_hits;
/* skip if there are no hits in cell i */
if (thin_backbone[i] == NULL)
continue;
#ifdef LOOKUP_VERBOSE
backbone_occupancy++;
#endif
num_hits = thin_backbone[i][1];
lookup->thick_backbone[i].num_used = num_hits;
PV_SET(pv, i, PV_ARRAY_BTS);
/* if there are few enough hits, copy them into
the thick_backbone cell; otherwise copy all
hits to the overflow array */
if (num_hits <= NA_HITS_PER_CELL) {
#ifdef LOOKUP_VERBOSE
thick_backbone_occupancy++;
#endif
for (j = 0; j < num_hits; j++) {
lookup->thick_backbone[i].payload.entries[j] =
thin_backbone[i][j + 2];
}
}
else {
#ifdef LOOKUP_VERBOSE
num_overflows++;
#endif
lookup->thick_backbone[i].payload.overflow_cursor =
overflow_cursor;
for (j = 0; j < num_hits; j++) {
lookup->overflow[overflow_cursor] =
thin_backbone[i][j + 2];
overflow_cursor++;
}
}
/* done with this chain */
sfree(thin_backbone[i]);
}
lookup->overflow_size = overflow_cursor;
#ifdef LOOKUP_VERBOSE
printf("backbone size: %d\n", lookup->backbone_size);
printf("backbone occupancy: %d (%f%%)\n", backbone_occupancy,
100.0 * backbone_occupancy / lookup->backbone_size);
printf("thick_backbone occupancy: %d (%f%%)\n",
thick_backbone_occupancy,
100.0 * thick_backbone_occupancy / lookup->backbone_size);
printf("num_overflows: %d\n", num_overflows);
printf("overflow size: %d\n", overflow_cells_needed);
printf("longest chain: %d\n", longest_chain);
#endif
}
Int4 BlastNaLookupTableNew(BLAST_SequenceBlk* query,
BlastSeqLoc* locations,
BlastNaLookupTable * *lut,
const LookupTableOptions * opt,
const QuerySetUpOptions* query_options,
Int4 lut_width)
{
Int4 **thin_backbone;
BlastNaLookupTable *lookup = *lut =
(BlastNaLookupTable *) calloc(1, sizeof(BlastNaLookupTable));
ASSERT(lookup != NULL);
lookup->word_length = opt->word_size;
lookup->lut_word_length = lut_width;
lookup->backbone_size = 1 << (BITS_PER_NUC * lookup->lut_word_length);
lookup->mask = lookup->backbone_size - 1;
lookup->overflow = NULL;
lookup->scan_step = lookup->word_length - lookup->lut_word_length + 1;
thin_backbone = (Int4 **) calloc(lookup->backbone_size, sizeof(Int4 *));
ASSERT(thin_backbone != NULL);
BlastLookupIndexQueryExactMatches(thin_backbone,
lookup->word_length,
BITS_PER_NUC,
lookup->lut_word_length,
query, locations);
if (locations &&
lookup->word_length > lookup->lut_word_length &&
s_HasMaskAtHashEnabled(query_options)) {
lookup->masked_locations = s_SeqLocListInvert(locations, query->length);
}
s_BlastNaLookupFinalize(thin_backbone, lookup);
sfree(thin_backbone);
return 0;
}
BlastNaLookupTable *BlastNaLookupTableDestruct(BlastNaLookupTable * lookup)
{
sfree(lookup->thick_backbone);
sfree(lookup->overflow);
if (lookup->masked_locations)
lookup->masked_locations = BlastSeqLocFree(lookup->masked_locations);
sfree(lookup->pv);
sfree(lookup);
return NULL;
}
/*--------------------- Megablast table ---------------------------*/
/** Convert weight, template length and template type from input options into
an MBTemplateType enum
*/
static EDiscTemplateType
s_GetDiscTemplateType(Int4 weight, Uint1 length,
EDiscWordType type)
{
if (weight == 11) {
if (length == 16) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_11_16_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_11_16_Optimal;
} else if (length == 18) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_11_18_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_11_18_Optimal;
} else if (length == 21) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_11_21_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_11_21_Optimal;
}
} else if (weight == 12) {
if (length == 16) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_12_16_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_12_16_Optimal;
} else if (length == 18) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_12_18_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_12_18_Optimal;
} else if (length == 21) {
if (type == eMBWordCoding || type == eMBWordTwoTemplates)
return eDiscTemplate_12_21_Coding;
else if (type == eMBWordOptimal)
return eDiscTemplate_12_21_Optimal;
}
}
return eDiscTemplateContiguous; /* All unsupported cases default to 0 */
}
/** Fills in the hashtable and next_pos fields of BlastMBLookupTable*
* for the discontiguous case.
*
* @param query the query sequence [in]
* @param location locations on the query to be indexed in table [in]
* @param mb_lt the (already allocated) megablast lookup
* table structure [in|out]
* @param lookup_options specifies the word_size and template options [in]
* @return zero on success, negative number on failure.
*/
static Int2
s_FillDiscMBTable(BLAST_SequenceBlk* query, BlastSeqLoc* location,
BlastMBLookupTable* mb_lt,
const LookupTableOptions* lookup_options)
{
BlastSeqLoc* loc;
EDiscTemplateType template_type;
EDiscTemplateType second_template_type = eDiscTemplateContiguous;
const Boolean kTwoTemplates =
(lookup_options->mb_template_type == eMBWordTwoTemplates);
PV_ARRAY_TYPE *pv_array=NULL;
Int4 pv_array_bts;
Int4 index;
Int4 template_length;
/* The calculation of the longest chain can be cpu intensive for
long queries or sets of queries. So we use a helper_array to
keep track of this, but compress it by kCompressionFactor so
it stays in cache. Hence we only end up with a conservative
(high) estimate for longest_chain, but this does not seem to
affect the overall performance of the rest of the program. */
Uint4 longest_chain;
Uint4* helper_array = NULL; /* Helps to estimate longest chain. */
Uint4* helper_array2 = NULL; /* Helps to estimate longest chain. */
const Int4 kCompressionFactor=2048; /* compress helper_array by this much */
ASSERT(mb_lt);
ASSERT(lookup_options->mb_template_length > 0);
mb_lt->next_pos = (Int4 *)calloc(query->length + 1, sizeof(Int4));
helper_array = (Uint4*) calloc(mb_lt->hashsize/kCompressionFactor,
sizeof(Uint4));
if (mb_lt->next_pos == NULL || helper_array == NULL)
return -1;
template_type = s_GetDiscTemplateType(lookup_options->word_size,
lookup_options->mb_template_length,
(EDiscWordType)lookup_options->mb_template_type);
ASSERT(template_type != eDiscTemplateContiguous);
mb_lt->template_type = template_type;
mb_lt->two_templates = kTwoTemplates;
/* For now leave only one possibility for the second template.
Note that the intention here is to select both the coding
and the optimal templates for one combination of word size
and template length. */
if (kTwoTemplates) {
/* Use the temporaray to avoid annoying ICC warning. */
int temp_int = template_type + 1;
second_template_type =
mb_lt->second_template_type = (EDiscTemplateType) temp_int;
mb_lt->hashtable2 = (Int4*)calloc(mb_lt->hashsize, sizeof(Int4));
mb_lt->next_pos2 = (Int4*)calloc(query->length + 1, sizeof(Int4));
helper_array2 = (Uint4*) calloc(mb_lt->hashsize/kCompressionFactor,
sizeof(Uint4));
if (mb_lt->hashtable2 == NULL ||
mb_lt->next_pos2 == NULL ||
helper_array2 == NULL)
return -1;
}
mb_lt->discontiguous = TRUE;
mb_lt->template_length = lookup_options->mb_template_length;
template_length = lookup_options->mb_template_length;
pv_array = mb_lt->pv_array;
pv_array_bts = mb_lt->pv_array_bts;
for (loc = location; loc; loc = loc->next) {
Int4 from;
Int4 to;
Uint8 accum = 0;
Int4 ecode1 = 0;
Int4 ecode2 = 0;
Uint1* pos;
Uint1* seq;
Uint1 val;
/* A word is added to the table after the last base
in the word is read in. At that point, the start
offset of the word is (template_length-1) positions
behind. This index is also incremented, because
lookup table indices are 1-based (offset 0 is reserved). */
from = loc->ssr->left - (template_length - 2);
to = loc->ssr->right - (template_length - 2);
seq = query->sequence_start + loc->ssr->left;
pos = seq + template_length;
for (index = from; index <= to; index++) {
val = *++seq;
/* if an ambiguity is encountered, do not add
any words that would contain it */
if ((val & BLAST2NA_MASK) != 0) {
accum = 0;
pos = seq + template_length;
continue;
}
/* get next base */
accum = (accum << BITS_PER_NUC) | val;
if (seq < pos)
continue;
#ifdef LOOKUP_VERBOSE
mb_lt->num_words_added++;
#endif
/* compute the hashtable index for the first template
and add 'index' at that position */
ecode1 = ComputeDiscontiguousIndex(accum, template_type);
if (mb_lt->hashtable[ecode1] == 0) {
#ifdef LOOKUP_VERBOSE
mb_lt->num_unique_pos_added++;
#endif
PV_SET(pv_array, ecode1, pv_array_bts);
}
else {
helper_array[ecode1/kCompressionFactor]++;
}
mb_lt->next_pos[index] = mb_lt->hashtable[ecode1];
mb_lt->hashtable[ecode1] = index;
if (!kTwoTemplates)
continue;
/* repeat for the second template, if applicable */
ecode2 = ComputeDiscontiguousIndex(accum, second_template_type);
if (mb_lt->hashtable2[ecode2] == 0) {
#ifdef LOOKUP_VERBOSE
mb_lt->num_unique_pos_added++;
#endif
PV_SET(pv_array, ecode2, pv_array_bts);
}
else {
helper_array2[ecode2/kCompressionFactor]++;
}
mb_lt->next_pos2[index] = mb_lt->hashtable2[ecode2];
mb_lt->hashtable2[ecode2] = index;
}
}
longest_chain = 2;
for (index = 0; index < mb_lt->hashsize / kCompressionFactor; index++)
longest_chain = MAX(longest_chain, helper_array[index]);
mb_lt->longest_chain = longest_chain;
sfree(helper_array);
if (kTwoTemplates) {
longest_chain = 2;
for (index = 0; index < mb_lt->hashsize / kCompressionFactor; index++)
longest_chain = MAX(longest_chain, helper_array2[index]);
mb_lt->longest_chain += longest_chain;
sfree(helper_array2);
}
return 0;
}
static Int2
s_FillPV(BLAST_SequenceBlk* query,
BlastSeqLoc* location,
BlastMBLookupTable* mb_lt,
const LookupTableOptions* lookup_options)
{
BlastSeqLoc* loc;
/* 12-mers (or perhaps 8-mers) are used to build the lookup table
and this is what kLutWordLength specifies. */
const Int4 kLutWordLength = mb_lt->lut_word_length;
const Int8 kLutMask = mb_lt->hashsize - 1;
/* The user probably specified a much larger word size (like 28)
and this is what full_word_size is. */
Int4 full_word_size = mb_lt->word_length;
Int4 index;
PV_ARRAY_TYPE *pv_array;
Int4 pv_array_bts;
ASSERT(mb_lt);
pv_array = mb_lt->pv_array;
pv_array_bts = mb_lt->pv_array_bts;
for (loc = location; loc; loc = loc->next) {
/* We want index to be always pointing to the start of the word.
Since sequence pointer points to the end of the word, subtract
word length from the loop boundaries. */
Int4 from = loc->ssr->left;
Int4 to = loc->ssr->right - kLutWordLength;
Int8 ecode = 0;
Int4 last_offset;
Uint1* pos;
Uint1* seq;
Uint1 val;
// int counter = 1; /* collect this many adjacent words */
/* case of unmasked region >= kLutWordLength but < full_word_size,
so no hits should be generated. */
if (full_word_size > (loc->ssr->right - loc->ssr->left + 1))
continue;
seq = query->sequence_start + from;
pos = seq + kLutWordLength;
/* Also add 1 to all indices, because lookup table indices count
from 1. */
from -= kLutWordLength - 2;
last_offset = to + 2;
for (index = from; index <= last_offset; index++) {
val = *++seq;
/* if an ambiguity is encountered, do not add
any words that would contain it */
if ((val & BLAST2NA_MASK) != 0) {
ecode = 0;
pos = seq + kLutWordLength;
continue;
}
/* get next base */
ecode = ((ecode << BITS_PER_NUC) & kLutMask) + val;
if (seq < pos)
continue;
PV_SET(pv_array, ecode, pv_array_bts);
}
}
return 0;
}
/* Remove words that appear in polyA tails from the lookup table: string of As,
string of Ts, and As and Ts with one error. */
static Int2 s_RemovePolyAWords(BlastMBLookupTable* mb_lt)
{
Int4 word_size = mb_lt->lut_word_length;
Int8 word;
Int4 i, k;
/* remove As and Ts */
mb_lt->hashtable[0] = 0;
mb_lt->hashtable[(Int8)((1 << (2 * word_size)) - 1)] = 0;
if (word_size < 16) {
return 0;
}
ASSERT(word_size == 16);
/* remove As with a single error */
for (i = 1;i < 4;i++) {
word = i;
for (k = 0;k < word_size;k++) {
mb_lt->hashtable[word << (k * 2)] = 0;
}
}
/* remove Ts with a single error */
for (i = 0;i < 3;i++) {
for (k = 0;k < word_size;k++) {
word = ((0xffffffff ^ (3 << k*2)) | (i << k*2)) & 0xffffffff;
mb_lt->hashtable[word] = 0;
}
}
return 0;
}
/** Fills in the hashtable and next_pos fields of BlastMBLookupTable*
* for the contiguous case.
*
* @param query the query sequence [in]
* @param location locations on the query to be indexed in table [in]
* @param mb_lt the (already allocated) megablast lookup table structure [in|out]
* @return zero on success, negative number on failure.
*/
static Int2
s_FillContigMBTable(BLAST_SequenceBlk* query,
BlastSeqLoc* location,
BlastMBLookupTable* mb_lt,
const LookupTableOptions* lookup_options,
Uint1* counts)
{
BlastSeqLoc* loc;
/* 12-mers (or perhaps 8-mers) are used to build the lookup table
and this is what kLutWordLength specifies. */
const Int4 kLutWordLength = mb_lt->lut_word_length;
const Int8 kLutMask = mb_lt->hashsize - 1;
/* The user probably specified a much larger word size (like 28)
and this is what full_word_size is. */
Int4 full_word_size = mb_lt->word_length;
Int4 index;
PV_ARRAY_TYPE *pv_array;
Int4 pv_array_bts;
/* The calculation of the longest chain can be cpu intensive for
long queries or sets of queries. So we use a helper_array to
keep track of this, but compress it by kCompressionFactor so
it stays in cache. Hence we only end up with a conservative
(high) estimate for longest_chain, but this does not seem to
affect the overall performance of the rest of the program. */
const Int4 kCompressionFactor=2048; /* compress helper_array by this much */
Uint4 longest_chain;
Uint4* helper_array;
const Boolean kDbFilter = lookup_options->db_filter;
ASSERT(mb_lt);
mb_lt->next_pos = (Int4 *)calloc(query->length + 1, sizeof(Int4));
if (mb_lt->next_pos == NULL)
return -1;
pv_array = mb_lt->pv_array;
pv_array_bts = mb_lt->pv_array_bts;
helper_array = (Uint4*) calloc(mb_lt->hashsize/kCompressionFactor,
sizeof(Uint4));
if (helper_array == NULL)
return -1;
/* if filtering by database word counts, then reset the pv array to avoid
too many bits set for database scanning */
if (kDbFilter) {
memset(pv_array, 0,
(mb_lt->hashsize >> mb_lt->pv_array_bts) * PV_ARRAY_BYTES);
}
for (loc = location; loc; loc = loc->next) {
/* We want index to be always pointing to the start of the word.
Since sequence pointer points to the end of the word, subtract
word length from the loop boundaries. */
Int4 from = loc->ssr->left;
Int4 to = loc->ssr->right - kLutWordLength;
Int8 ecode = 0;
Int4 last_offset;
Uint1* pos;
Uint1* seq;
Uint1 val;
Uint1 max_word_count = lookup_options->max_db_word_count;
// int counter = 1; /* collect this many adjacent words */
int shift = 0;
int pos_shift = 0;
if (lookup_options->stride > 0) {
shift = lookup_options->stride - 1;
pos_shift = kLutWordLength + 1;
}
/* case of unmasked region >= kLutWordLength but < full_word_size,
so no hits should be generated. */
if (full_word_size > (loc->ssr->right - loc->ssr->left + 1))
continue;
seq = query->sequence_start + from;
pos = seq + kLutWordLength;
/* Also add 1 to all indices, because lookup table indices count
from 1. */
from -= kLutWordLength - 2;
last_offset = to + 2;
for (index = from; index <= last_offset; index++) {
val = *++seq;
/* if an ambiguity is encountered, do not add
any words that would contain it */
if ((val & BLAST2NA_MASK) != 0) {
ecode = 0;
pos = seq + kLutWordLength;
continue;
}
/* get next base */
ecode = ((ecode << BITS_PER_NUC) & kLutMask) + val;
if (seq < pos)
continue;
/* if filtering by database word count, then do not add words
with too many counts */
if (kDbFilter) {
if (!(ecode & 1)) {
if ((counts[ecode / 2] >> 4) >= max_word_count) {
continue;
}
}
else {
if ((counts[ecode / 2] & 0xf) >= max_word_count) {
continue;
}
}
}
/* collect 1 word and skip lookup_options->stride */
/*
if (!counter) {
pos = seq + lookup_options->stride;
counter = 1;
continue;
}
if (lookup_options->stride) {
counter--;
}
*/
#ifdef LOOKUP_VERBOSE
mb_lt->num_words_added++;
#endif
if (mb_lt->hashtable[ecode] == 0) {
#ifdef LOOKUP_VERBOSE
mb_lt->num_unique_pos_added++;
#endif
PV_SET(pv_array, ecode, pv_array_bts);
}
else {
helper_array[ecode/kCompressionFactor]++;
}
mb_lt->next_pos[index] = mb_lt->hashtable[ecode];
mb_lt->hashtable[ecode] = index;
/* skip shift words */
index += shift;
seq += shift;
pos = seq + pos_shift;
}
}
if (Blast_ProgramIsMapping(lookup_options->program_number)) {
s_RemovePolyAWords(mb_lt);
}
longest_chain = 2;
for (index = 0; index < mb_lt->hashsize / kCompressionFactor; index++)
longest_chain = MAX(longest_chain, helper_array[index]);
mb_lt->longest_chain = longest_chain;
sfree(helper_array);
return 0;
}
/** Scan a subject sequecne and update words counters, for 16-base words with
* scan step of 1. The counters are 4-bit and counting is done up to 10.
*
* @param sequence Subject sequence [in]
* @param mb_lt Megablast lookup table [in|out]
* @param counts Word counters [in|out]
*/
static Int2
s_MBCountWordsInSubject_16_1(const BLAST_SequenceBlk* sequence,
BlastMBLookupTable* mb_lt,
Uint1* counts,
Uint1 max_word_count)
{
Uint1 *s;
Int4 i;
Int8 mask = mb_lt->hashsize - 1;
Int8 word, index, w;
const Int4 kNumWords
= sequence->length - mb_lt->lut_word_length;
PV_ARRAY_TYPE* pv = mb_lt->pv_array;
Int4 pv_array_bts = mb_lt->pv_array_bts;
Int4 shift;
if (!sequence || !counts || !mb_lt || !pv) {
return -1;
}
ASSERT(mb_lt->lut_word_length == 16);
/* scan the words in the sequence */
shift = 8;
s = sequence->sequence;
w = (Int8)s[0] << 24 | (Int8)s[1] << 16 | (Int8)s[2] << 8 | s[3];
for (i = 0;i < kNumWords;i++) {
if (i % COMPRESSION_RATIO == 0) {
shift = 8;
w = (w << 8) | (Int8)s[i / COMPRESSION_RATIO + 4];
}
else {
shift -= 2;
ASSERT(shift > 0);
}
word = (w >> shift) & mask;
/* skip words that do not appear in the query */
if (!PV_TEST(pv, word, pv_array_bts)) {
continue;
}
/* update the counter */
index = word / 2;
if (word & 1) {
if ((counts[index] & 0xf) < max_word_count) {
counts[index]++;
}
}
else {
if ((counts[index] >> 4) < max_word_count) {
counts[index] += 1 << 4;
}
}
}
return 0;
}
/** Scan database sequences and count query words that appear in the database.
* Then reset pv_array bits that correspond to words that do not appear in
* in the database, or appear 10 or more times
*
* @param seq_src Source for subject sequences [in]
* @param mb_lt Megablast lookuptable [in|out]
*/
static Int2
s_ScanSubjectForWordCounts(BlastSeqSrc* seq_src,
BlastMBLookupTable* mb_lt,
Uint1* counts,
Uint1 max_word_count)
{
BlastSeqSrcIterator* itr;
BlastSeqSrcGetSeqArg seq_arg;
PV_ARRAY_TYPE* pv = mb_lt->pv_array;
if (!seq_src || !pv || !counts) {
return -1;
}
memset(&seq_arg, 0, sizeof(seq_arg));
seq_arg.encoding = eBlastEncodingProtein;
/* scan subject sequences and update the counters for each */
BlastSeqSrcResetChunkIterator(seq_src);
itr = BlastSeqSrcIteratorNewEx(MAX(BlastSeqSrcGetNumSeqs(seq_src)/100,1));
while ((seq_arg.oid = BlastSeqSrcIteratorNext(seq_src, itr))
!= BLAST_SEQSRC_EOF) {
BlastSeqSrcGetSequence(seq_src, &seq_arg);
s_MBCountWordsInSubject_16_1(seq_arg.seq, mb_lt, counts,
max_word_count);
BlastSeqSrcReleaseSequence(seq_src, &seq_arg);
}
BlastSequenceBlkFree(seq_arg.seq);
BlastSeqSrcIteratorFree(itr);
return 0;
}
/* Documentation in mb_lookup.h */
Int2 BlastMBLookupTableNew(BLAST_SequenceBlk* query, BlastSeqLoc* location,
BlastMBLookupTable** mb_lt_ptr,
const LookupTableOptions* lookup_options,
const QuerySetUpOptions* query_options,
Int4 approx_table_entries,
Int4 lut_width,
BlastSeqSrc* seqsrc)
{
Int4 pv_size;
Int2 status = 0;
BlastMBLookupTable* mb_lt;
const Int4 kTargetPVSize = 131072;
const Int4 kSmallQueryCutoff = 15000;
const Int4 kLargeQueryCutoff = 800000;
Uint1* counts = NULL; /* array of word counts */
*mb_lt_ptr = NULL;
if (!location || !query) {
/* Empty sequence location provided */
return -1;
}
mb_lt = (BlastMBLookupTable*)calloc(1, sizeof(BlastMBLookupTable));
if (mb_lt == NULL) {
return -1;
}
ASSERT(lut_width >= 9);
mb_lt->word_length = lookup_options->word_size;
/* mb_lt->skip = lookup_options->skip; */
mb_lt->stride = lookup_options->stride > 0;
mb_lt->lut_word_length = lut_width;
mb_lt->hashsize = 1ULL << (BITS_PER_NUC * mb_lt->lut_word_length);
mb_lt->hashtable = (Int4*)calloc(mb_lt->hashsize, sizeof(Int4));
if (mb_lt->hashtable == NULL) {
BlastMBLookupTableDestruct(mb_lt);
return -1;
}
if (location &&
mb_lt->word_length > mb_lt->lut_word_length &&
s_HasMaskAtHashEnabled(query_options)) {
mb_lt->masked_locations = s_SeqLocListInvert(location, query->length);
}
/* Allocate the PV array. To fit in the external cache of
latter-day microprocessors, the PV array cannot have one
bit for for every lookup table entry. Instead we choose
a size that should fit in cache and make a single bit
of the PV array handle multiple hashtable entries if
necessary.
If the query is too small or too large, the compression
should be higher. Small queries don't reuse the PV array,
and large queries saturate it. In either case, cache
is better used on something else */
if (mb_lt->lut_word_length <= 12) {
if (mb_lt->hashsize <= 8 * kTargetPVSize)
pv_size = mb_lt->hashsize >> PV_ARRAY_BTS;
else
pv_size = kTargetPVSize / PV_ARRAY_BYTES;
}
else {
/* use 8M-byte pv array for large lut word (only size 16 implemented
currently) */
pv_size = kTargetPVSize * 64 / PV_ARRAY_BYTES;
}
if(!lookup_options->db_filter &&
(approx_table_entries <= kSmallQueryCutoff ||
approx_table_entries >= kLargeQueryCutoff)) {
pv_size = pv_size / 2;
}
mb_lt->pv_array_bts = ilog2(mb_lt->hashsize / pv_size);
mb_lt->pv_array = calloc(PV_ARRAY_BYTES, pv_size);
if (mb_lt->pv_array == NULL) {
BlastMBLookupTableDestruct(mb_lt);
return -1;
}
/* allocate word counters, to save memory we are using 4 bits per word */
if (lookup_options->db_filter) {
counts = (Uint1*)calloc(mb_lt->hashsize / 2, sizeof(Uint1));
if (counts == NULL) {
BlastMBLookupTableDestruct(mb_lt);
return -1;
}
}
if (lookup_options->db_filter) {
s_FillPV(query, location, mb_lt, lookup_options);
s_ScanSubjectForWordCounts(seqsrc, mb_lt, counts,
lookup_options->max_db_word_count);
}
if (lookup_options->mb_template_length > 0) {
/* discontiguous megablast */
mb_lt->scan_step = 1;
status = s_FillDiscMBTable(query, location, mb_lt, lookup_options);
}
else {
/* contiguous megablast */
mb_lt->scan_step = mb_lt->word_length - mb_lt->lut_word_length + 1;
status = s_FillContigMBTable(query, location, mb_lt, lookup_options,
counts);
if (status) {
BlastMBLookupTableDestruct(mb_lt);
return -1;
}
}
if (lookup_options->db_filter && counts) {
free(counts);
}
if (status > 0) {
BlastMBLookupTableDestruct(mb_lt);
return status;
}
*mb_lt_ptr = mb_lt;
#ifdef LOOKUP_VERBOSE
printf("lookup table size: %ld (%d letters)\n", mb_lt->hashsize,
mb_lt->lut_word_length);
printf("words in table: %d\n", mb_lt->num_words_added);
printf("filled entries: %d (%f%%)\n", mb_lt->num_unique_pos_added,
100.0 * mb_lt->num_unique_pos_added / mb_lt->hashsize);
printf("PV array size: %d bytes (%ld table entries/bit)\n",
pv_size * PV_ARRAY_BYTES,
mb_lt->hashsize / (pv_size << PV_ARRAY_BTS));
printf("longest chain: %d\n", mb_lt->longest_chain);
#endif
return 0;
}
BlastMBLookupTable* BlastMBLookupTableDestruct(BlastMBLookupTable* mb_lt)
{
if (!mb_lt)
return NULL;
sfree(mb_lt->hashtable);
sfree(mb_lt->next_pos);
sfree(mb_lt->hashtable2);
sfree(mb_lt->next_pos2);
sfree(mb_lt->pv_array);
if (mb_lt->masked_locations)
mb_lt->masked_locations = BlastSeqLocFree(mb_lt->masked_locations);
sfree(mb_lt);
return mb_lt;
}
/* Hash function: Fowler-Noll-Vo (FNV) hash
http://www.isthe.com/chongo/tech/comp/fnv/index.html */
static Uint4 FNV_hash(Uint1* seq, Uint4 mask)
{
const Uint4 fnv_prime = 16777619u;
const Uint4 fnv_offset_basis = 2166136261u;
Int4 i;
Uint4 hash;
hash = fnv_offset_basis;
for (i = 0;i < 4;i++) {
hash *= fnv_prime;
hash ^= seq[i];
}
return hash & mask;
}
static Int2
s_NaHashLookupFillPV(BLAST_SequenceBlk* query,
BlastSeqLoc* locations,
BlastNaHashLookupTable* lookup)
{
BlastSeqLoc *loc;
Int4 word_length;
Int4 lut_word_length;
PV_ARRAY_TYPE* pv = NULL;
const Int4 pv_array_bts = lookup->pv_array_bts;
ASSERT(lookup);
word_length = lookup->word_length;
lut_word_length = lookup->lut_word_length;
pv = lookup->pv;
ASSERT(pv);
for (loc = locations; loc; loc = loc->next) {
/* We want index to be always pointing to the start of the word.
Since sequence pointer points to the end of the word, subtract
word length from the loop boundaries. */
Int4 from = loc->ssr->left;
Int4 to = loc->ssr->right;
Uint4 ecode = 0;
Uint1* pos;
Uint1* seq;
Uint1* end;
Uint1 base;
/* case of unmasked region >= kLutWordLength but < full_word_size,
so no hits should be generated. */
if (word_length > (loc->ssr->right - loc->ssr->left + 1)) {
continue;
}
seq = query->sequence + from;
pos = seq + lut_word_length - 1;
end = query->sequence + to + 1;
for (; seq < end; seq++) {
base = *seq;
/* if an ambiguity is encountered, do not add
any words that would contain it */
if ((base & BLAST2NA_MASK) != 0) {
ecode = 0;
pos = seq + lut_word_length;
continue;
}
/* get next base */
ecode = (ecode << BITS_PER_NUC) | base;
if (seq < pos) {
continue;
}
PV_SET(pv, (Int8)ecode, pv_array_bts);
}
}
return 0;
}
/* Get number of set bits (adapted
from http://graphics.stanford.edu/~seander/bithacks.html)
@param v Bit vector [in]
@return Number of set bits
*/
static Uint4 s_Popcount(Uint4 v)
{
if (v==0) return 0; // early bailout for sparse vectors
v = v - ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
v = ((v + (v >> 4)) & 0xF0F0F0F);
v = v * 0x1010101;
return v >> 24; // count
}
/** Sparse array of Uint1 implemented with a bitfield. The implementation
assumes that indices present are known beforehand and the array is used
only to access values with certain indices */
typedef struct BlastSparseUint1Array
{
Uint4* bitfield; /**< bitfield with bits set for present indices */
Uint1* values; /**< array of values for present indices */
Int4* counts; /**< cumulative number of bits set */
Uint4 num_elements; /**< number of values present in the array */
Uint4 length; /**< length of the bitfield */
} BlastSparseUint1Array;
static BlastSparseUint1Array*
BlastSparseUint1ArrayFree(BlastSparseUint1Array* array)
{
if (!array) {
return NULL;
}
if (array->values) {
free(array->values);
}
if (array->counts) {
free(array->counts);
}
free(array);
return NULL;
}
static BlastSparseUint1Array*
BlastSparseUint1ArrayNew(Uint4* bitfield, Int8 len)
{
Int4 i;
BlastSparseUint1Array* retval = calloc(1, sizeof(BlastSparseUint1Array));
if (!retval || !bitfield) {
return NULL;
}
retval->bitfield = bitfield;
retval->length = len >> PV_ARRAY_BTS;
retval->counts = calloc(retval->length, sizeof(Int4));
if (!retval->counts) {
BlastSparseUint1ArrayFree(retval);
return NULL;
}
retval->counts[0] = s_Popcount(retval->bitfield[0]);
for (i = 1;i < retval->length;i++) {
retval->counts[i] = retval->counts[i - 1] +
s_Popcount(retval->bitfield[i]);
}
Int4 num_elements = retval->counts[retval->length - 1];
retval->num_elements = num_elements;
retval->values = calloc(num_elements, sizeof(Uint1));
if (!retval->values) {
BlastSparseUint1ArrayFree(retval);
return NULL;
}
return retval;
}
/* Get index into array->values for a given vector index */
static Int4
BlastSparseUint1ArrayGetIndex(BlastSparseUint1Array* array, Int8 index)
{
/* index into bitfield */
Int4 idx = index >> PV_ARRAY_BTS;
/* bit number within a bitfield cell (mod 32) */
Int4 bit_number = index & PV_ARRAY_MASK;
/* number of bits set before a specified bit */
Int4 bit_count = 0;
if (!array || idx >= array->length) {
return -1;
}
/* get number of bits set up to idx */
bit_count = (idx > 0) ? array->counts[idx - 1] : 0;
ASSERT(array->bitfield[idx] & (1 << bit_number));
/* add number of bits set up to bit number in the cell */
bit_count += s_Popcount(array->bitfield[idx] & ((1 << bit_number) - 1));
bit_count++;
ASSERT(bit_count > 0);
return bit_count - 1;
}
/* Get a pointer to a non zero element in the sparse vector */
static Uint1*
BlastSparseUint1ArrayGetElement(BlastSparseUint1Array* array, Int8 index)
{
Int4 sparse_index;
if (!array) {
return NULL;
}
sparse_index = BlastSparseUint1ArrayGetIndex(array, index);
ASSERT(sparse_index < array->num_elements);
if (sparse_index < 0 || sparse_index > array->num_elements) {
return NULL;
}
return array->values + sparse_index;
}
/** Scan a subject sequecne and update words counters, for 16-base words with
* scan step of 1. The counters are 4-bit and counting is done up to 10.
*
* @param sequence Subject sequence [in]
* @param lookup Hashed lookup table [in|out]
* @param counts Word counters [in|out]
*/
static Int2
s_NaHashLookupCountWordsInSubject_16_1(const BLAST_SequenceBlk* sequence,
BlastNaHashLookupTable* lookup,
BlastSparseUint1Array* counts,
Uint1 max_word_count)
{
Uint1 *s;
Int4 i;
Int8 mask = (1ULL << (16 * BITS_PER_NUC)) - 1;
Int8 word, w;
const Int4 kNumWords
= sequence->length - lookup->lut_word_length;
PV_ARRAY_TYPE* pv = lookup->pv;
Int4 pv_array_bts = lookup->pv_array_bts;
Int4 shift;
Uint1* pelem;
if (!sequence || !counts || !lookup || !pv) {
return -1;
}
ASSERT(lookup->lut_word_length == 16);
if (sequence->length < lookup->lut_word_length) {
return -1;
}
/* scan the words in the sequence */
shift = 8;
s = sequence->sequence;
w = (Int8)s[0] << 24 | (Int8)s[1] << 16 | (Int8)s[2] << 8 | s[3];
for (i = 0;i < kNumWords;i++) {
if (i % COMPRESSION_RATIO == 0) {
shift = 8;
w = (w << 8) | (Int8)s[i / COMPRESSION_RATIO + 4];
}
else {
shift -= 2;
ASSERT(shift > 0);
}
word = (w >> shift) & mask;
/* skip words that do not appear in the query */
if (!PV_TEST(pv, word, pv_array_bts)) {
continue;
}
/* update the counter */
pelem = BlastSparseUint1ArrayGetElement(counts, word);
if (*pelem < max_word_count) {
(*pelem)++;
}
}
return 0;
}
/* Thread local data for database word counting phase of lookup table
generation (for Magic-BLAST) */
typedef struct NaHashLookupThreadData
{
BlastSeqSrcGetSeqArg* seq_arg;
BlastSeqSrcIterator** itr;
BlastSeqSrc** seq_src;
BlastSparseUint1Array** word_counts;
Int4 num_threads;
} NaHashLookupThreadData;
static NaHashLookupThreadData* NaHashLookupThreadDataFree(
NaHashLookupThreadData* th)
{
if (!th) {
return NULL;
}
if (th->seq_arg) {
Int4 i;
for (i = 0;i < th->num_threads;i++) {
BlastSequenceBlkFree(th->seq_arg[i].seq);
}
free(th->seq_arg);
}
if (th->itr) {
Int4 i;
for (i = 0;i < th->num_threads;i++) {
BlastSeqSrcIteratorFree(th->itr[i]);
}
free(th->itr);
}
if (th->seq_src) {
Int4 i;
for (i = 0;i < th->num_threads;i++) {
BlastSeqSrcFree(th->seq_src[i]);
}
free(th->seq_src);
}
if (th->word_counts) {
Int4 i;
for (i = 1;i < th->num_threads;i++) {
if (th->word_counts[i]) {
if (th->word_counts[i]->values) {
free(th->word_counts[i]->values);
}
free(th->word_counts[i]);
}
}
BlastSparseUint1ArrayFree(th->word_counts[0]);
free(th->word_counts);
}
free(th);
return NULL;
}
static NaHashLookupThreadData* NaHashLookupThreadDataNew(Int4 num_threads,
BlastNaHashLookupTable* lookup,
BlastSeqSrc* seq_src)
{
Int4 i;
if (num_threads < 1 || !lookup || !seq_src) {
return NULL;
}
NaHashLookupThreadData* retval = calloc(1, sizeof(NaHashLookupThreadData));
if (!retval) {
return NULL;
}
retval->seq_arg = calloc(num_threads, sizeof(BlastSeqSrcGetSeqArg));
if (!retval->seq_arg) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
retval->itr = calloc(num_threads, sizeof(BlastSeqSrcIterator*));
if (!retval->itr) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
retval->seq_src = calloc(num_threads, sizeof(BlastSeqSrc*));
if (!retval->seq_src) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
retval->word_counts = calloc(num_threads, sizeof(BlastSparseUint1Array*));
if (!retval->word_counts) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
for (i = 0;i < num_threads;i++) {
retval->seq_arg[i].encoding = eBlastEncodingProtein;
retval->seq_src[i] = BlastSeqSrcCopy(seq_src);
if (!retval->seq_src[i]) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
/* each thread must have its own iterator, the small batch seems to
work better for work balansing between threads */
retval->itr[i] = BlastSeqSrcIteratorNewEx(1);
if (!retval->itr[i]) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
if (i == 0) {
retval->word_counts[i] = BlastSparseUint1ArrayNew(lookup->pv,
1LL << (2 * lookup->lut_word_length));
if (!retval->word_counts[i]) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
}
else {
/* Make shallow copies of the counts array. We do not copy data
that are read only to save memory. */
retval->word_counts[i] = malloc(sizeof(BlastSparseUint1Array));
if (!retval->word_counts[i]) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
memcpy(retval->word_counts[i], retval->word_counts[0],
sizeof(BlastSparseUint1Array));
retval->word_counts[i]->values = calloc(
retval->word_counts[i]->num_elements,
sizeof(Uint1));
if (!retval->word_counts[i]->values) {
NaHashLookupThreadDataFree(retval);
return NULL;
}
}
}
retval->num_threads = num_threads;
return retval;
}
/** Scan database sequences and count query words that appear in the database.
* Then reset pv_array bits that correspond to words that do not appear in
* in the database, or appear 10 or more times
*
* @param seq_src Source for subject sequences [in]
* @param lookup Hashed lookuptable [in|out]
* @param num_threads Number of threads to use [in]
*/
static Int2
s_NaHashLookupScanSubjectForWordCounts(BlastSeqSrc* seq_src,
BlastNaHashLookupTable* lookup,
Uint4 in_num_threads,
Uint1 max_word_count)
{
Int8 i;
Int4 k, b;
Int4 num_db_seqs, th_batch;
NaHashLookupThreadData* th_data = NULL;
Uint4 num_threads;
if (!seq_src || !lookup || !lookup->pv) {
return -1;
}
ASSERT(lookup->lut_word_length == 16);
/* pv array must be one bit per word */
ASSERT(lookup->pv_array_bts == 5);
num_db_seqs = BlastSeqSrcGetNumSeqs(seq_src);
num_threads = MIN(in_num_threads, num_db_seqs);
th_batch = BlastSeqSrcGetNumSeqs(seq_src) / num_threads;
th_data = NaHashLookupThreadDataNew(num_threads, lookup, seq_src);
if (!th_data) {
return -1;
}
/* reset database iterator */
BlastSeqSrcResetChunkIterator(seq_src);
/* scan subject sequences and update the counters for each */
#pragma omp parallel for if (num_threads > 1) num_threads(num_threads) \
default(none) shared(num_threads, th_data, lookup, \
th_batch, max_word_count) private(i) \
schedule(dynamic, 1)
for (i = 0;i < num_threads;i++) {
Int4 j;
for (j = 0;j < th_batch;j++) {
#pragma omp critical (get_sequence_for_word_counts)
{
th_data->seq_arg[i].oid = BlastSeqSrcIteratorNext(
th_data->seq_src[i],
th_data->itr[i]);
if (th_data->seq_arg[i].oid != BLAST_SEQSRC_EOF) {
BlastSeqSrcGetSequence(th_data->seq_src[i],
&th_data->seq_arg[i]);
}
}
if (th_data->seq_arg[i].oid != BLAST_SEQSRC_EOF) {
s_NaHashLookupCountWordsInSubject_16_1(th_data->seq_arg[i].seq,
lookup,
th_data->word_counts[i],
max_word_count);
BlastSeqSrcReleaseSequence(th_data->seq_src[i],
&th_data->seq_arg[i]);
}
}
}
/* scan the last sequences */
while ((th_data->seq_arg[0].oid = BlastSeqSrcIteratorNext(seq_src,
th_data->itr[0]))
!= BLAST_SEQSRC_EOF) {
BlastSeqSrcGetSequence(seq_src, &th_data->seq_arg[0]);
s_NaHashLookupCountWordsInSubject_16_1(th_data->seq_arg[0].seq, lookup,
th_data->word_counts[0],
max_word_count);
BlastSeqSrcReleaseSequence(seq_src, &th_data->seq_arg[0]);
}
/* aggregate counts */
for (i = 0;i < th_data->word_counts[0]->num_elements;i++) {
for (k = 1;k < num_threads;k++) {
th_data->word_counts[0]->values[i] =
MIN(th_data->word_counts[0]->values[i] +
th_data->word_counts[k]->values[i],
max_word_count);
}
}
/* iterate over word counts and clear bits for words that appear too
often or not at all */
i = 0;
b = 1;
k = 0;
while (i < th_data->word_counts[0]->length) {
/* skip bit field array elements with all bits cleared */
if (th_data->word_counts[0]->bitfield[i] == 0) {
i++;
b = 1;
continue;
}
if (th_data->word_counts[0]->bitfield[i] & b) {
ASSERT(k < th_data->word_counts[0]->num_elements);
/* clear bit if word count is too low or too large */
if (th_data->word_counts[0]->values[k] == 0 ||
th_data->word_counts[0]->values[k] >= max_word_count) {
th_data->word_counts[0]->bitfield[i] &= ~b;
}
k++;
}
b <<= 1;
if (b == 0) {
i++;
b = 1;
}
}
NaHashLookupThreadDataFree(th_data);
return 0;
}
static Int2 s_NaHashLookupRemovePolyAWords(BlastNaHashLookupTable* lookup)
{
Int8 word;
Int4 word_size;
Int4 i, k;
PV_ARRAY_TYPE* pv = NULL;
Int4 pv_array_bts;
if (!lookup) {
return -1;
}
ASSERT(lookup->lut_word_length == 16);
/* a bit must represent a single word */
ASSERT(lookup->pv_array_bts == 5);
pv = lookup->pv;
pv_array_bts = lookup->pv_array_bts;
word_size = lookup->lut_word_length;
/* remove As and Ts */
pv[0] &= ~(PV_ARRAY_TYPE)1;
pv[0xffffffff >> pv_array_bts] &=
~((PV_ARRAY_TYPE)1 << (0xffffffff & PV_ARRAY_MASK));
/* remove As with a single error */
for (i = 1;i < 4;i++) {
word = i;
for (k = 0;k < word_size;k++) {
pv[word >> pv_array_bts] &=
~((PV_ARRAY_TYPE)1 << (word & PV_ARRAY_MASK));
}
}
/* remove Ts with a single error */
for (i = 0;i < 3;i++) {
for (k = 0;k < word_size;k++) {
word = ((0xffffffff ^ (3 << k*2)) | (i << k*2)) & 0xffffffff;
pv[word >> pv_array_bts] &=
~((PV_ARRAY_TYPE)1 << (word & PV_ARRAY_MASK));
}
}
return 0;
}
/** Pack the data structures comprising a nucleotide lookup table
* into their final form
* @param thin_backbone structure containing indexed query offsets [in][out]
* @param lookup the lookup table [in]
*/
static void s_BlastNaHashLookupFinalize(BackboneCell* thin_backbone,
Int4* offsets,
BlastNaHashLookupTable* lookup)
{
Int4 i;
Int4 overflow_cells_needed = 0;
Int4 overflow_cursor = 0;
Int4 longest_chain = 0;
PV_ARRAY_TYPE *pv;
const Int4 pv_array_bts = lookup->pv_array_bts;
const Int8 kNumWords = 1LL << (2 * lookup->lut_word_length);
#ifdef LOOKUP_VERBOSE
Int4 backbone_occupancy = 0;
Int4 thick_backbone_occupancy = 0;
Int4 num_overflows = 0;
Int4 words_per_hash[5] = {0,};
#endif
ASSERT(lookup->lut_word_length == 16);
if (!lookup->pv) {
lookup->pv = (PV_ARRAY_TYPE*)calloc(kNumWords >> lookup->pv_array_bts,
sizeof(PV_ARRAY_TYPE));
ASSERT(lookup->pv);
}
else {
/* reset PV array, it might have been set earlier to count database
words, and a few bits may need to be reset */
memset(lookup->pv, 0, (kNumWords >> lookup->pv_array_bts) *
sizeof(PV_ARRAY_TYPE));
}
pv = lookup->pv;
ASSERT(pv != NULL);
/* allocate the new lookup table */
lookup->thick_backbone = (NaHashLookupBackboneCell *)calloc(
lookup->backbone_size,
sizeof(NaHashLookupBackboneCell));
ASSERT(lookup->thick_backbone != NULL);
/* find out how many cells are needed for the overflow array */
for (i = 0; i < lookup->backbone_size; i++) {
BackboneCell* b = &thin_backbone[i];
Int4 num_hits = 0;
Int4 num_words = 0;
if (b->num_offsets > 0) {
for (; b; b = b->next) {
num_hits += b->num_offsets;
num_words++;
}
}
if (num_words > NA_WORDS_PER_HASH || num_hits > NA_OFFSETS_PER_HASH) {
/* +1 because we store unhashed word to resolve hash collisions
+1 for number of offsets */
overflow_cells_needed += num_hits + (num_words * 2);
}
longest_chain = MAX(longest_chain, num_hits);
}
lookup->longest_chain = longest_chain;
/* allocate the overflow array */
if (overflow_cells_needed > 0) {
lookup->overflow = (Int4*)calloc(overflow_cells_needed, sizeof(Int4));
ASSERT(lookup->overflow != NULL);
}
/* for each position in the lookup table backbone, */
for (i = 0; i < lookup->backbone_size; i++) {
Int4 num_words = 0;
Int4 num_offsets = 0;
NaHashLookupBackboneCell* cell = lookup->thick_backbone + i;
BackboneCell* head = &thin_backbone[i];
BackboneCell* b = NULL;
Boolean is_overflow = FALSE;
if (head->num_offsets == 0) {
continue;
}
#ifdef LOOKUP_VERBOSE
thick_backbone_occupancy++;
#endif
/* for each cell with the same hash value in the thin backbone
count number of words and offsets stored */
for (b = head; b; b = b->next) {
num_words++;
num_offsets += b->num_offsets;
#ifdef LOOKUP_VERBOSE
backbone_occupancy++;
#endif
}
cell->num_words = num_words;
#ifdef LOOKUP_VERBOSE
words_per_hash[((num_words < 6) ? num_words : 5) - 1]++;
#endif
/* if the thin cell stores at most 3 words and 9 offsets, store them
all in the thick backbone */
if (num_words <= NA_WORDS_PER_HASH &&
num_offsets <= NA_OFFSETS_PER_HASH) {
Int4 k = 0;
Int4 n = 0;
for (b = head; b; b = b->next, k++) {
Int4 j;
cell->words[k] = b->word;
cell->num_offsets[k] = b->num_offsets;
PV_SET(pv, (Int8)b->word, pv_array_bts);
j = b->offset;
while (j != 0) {
ASSERT(n <= NA_OFFSETS_PER_HASH);
/* offsets array stores 1-based offsets */
cell->offsets[n++] = j - 1;
j = offsets[j];
}
}
}
/* otherwise, store them in the overflow array */
else if (num_words <= NA_WORDS_PER_HASH) {
Int4 k = 0;
for (b = head; b; b = b->next, k++) {
cell->words[k] = b->word;
}
is_overflow = TRUE;
}
else {
is_overflow = TRUE;
}
/* add words and offsets to overflow array: word, number of offsets,
offsets */
if (is_overflow) {
#ifdef LOOKUP_VERBOSE
num_overflows++;
#endif
cell->offsets[0] = overflow_cursor;
for (b = head; b; b = b->next) {
Int4 j;
lookup->overflow[overflow_cursor++] = *(Int4*)(&b->word);
lookup->overflow[overflow_cursor++] = b->num_offsets;
j = b->offset;
while (j != 0) {
/* offsets array stores 1-based offsets */
lookup->overflow[overflow_cursor++] = j - 1;
j = offsets[j];
}
ASSERT(overflow_cursor <= overflow_cells_needed);
PV_SET(pv, (Int8)b->word, pv_array_bts);
}
}
/* done with this chain */
BackboneCellFree(thin_backbone[i].next);
}
lookup->offsets_size = overflow_cursor;
#ifdef LOOKUP_VERBOSE
printf("backbone size: %d\n", lookup->backbone_size);
printf("backbone occupancy: %d (%f%%)\n", backbone_occupancy,
100.0 * backbone_occupancy / lookup->backbone_size);
printf("thick_backbone occupancy: %d (%f%%)\n",
thick_backbone_occupancy,
100.0 * thick_backbone_occupancy / lookup->backbone_size);
printf("num_overflows: %d\n", num_overflows);
printf("\tnumber of words per hash\tcount\n");
{
Int4 ii;
for (ii = 0;ii < 5;ii++) {
printf("\t%d\t%d\n", ii + 1, words_per_hash[ii]);
}
}
printf("overflow size: %d\n", overflow_cells_needed);
printf("longest chain: %d\n", longest_chain);
#endif
}
BlastNaHashLookupTable*
BlastNaHashLookupTableDestruct(BlastNaHashLookupTable* lookup)
{
sfree(lookup->thick_backbone);
sfree(lookup->overflow);
if (lookup->masked_locations)
lookup->masked_locations = BlastSeqLocFree(lookup->masked_locations);
if (lookup->pv)
sfree(lookup->pv);
sfree(lookup);
return NULL;
}
Int4 BlastNaHashLookupTableNew(BLAST_SequenceBlk* query,
BlastSeqLoc* locations,
BlastNaHashLookupTable** lut,
const LookupTableOptions* opt,
const QuerySetUpOptions* query_options,
BlastSeqSrc* seqsrc,
Uint4 num_threads)
{
BackboneCell *thin_backbone = NULL;
Int4* offsets = NULL;
BlastNaHashLookupTable *lookup = *lut =
(BlastNaHashLookupTable*) calloc(1, sizeof(BlastNaHashLookupTable));
/* Number of possible 16-base words */
const Int8 kNumWords = (1ULL << 32);
Int4 num_hash_bits = 8;
Int4 i, num_unique_words = 0;
ASSERT(lookup != NULL);
if (opt->db_filter && !seqsrc) {
return -1;
}
lookup->word_length = opt->word_size;
lookup->lut_word_length = 16;
lookup->overflow = NULL;
lookup->hash_callback = FNV_hash;
if (opt->db_filter) {
/* with database filtering some query words are not put in the lookup
table and neighboring query words would be missed with larger scan
step */
lookup->scan_step = 1;
}
else {
lookup->scan_step = lookup->word_length - lookup->lut_word_length + 1;
}
/* PV array does not use hashing */
lookup->pv_array_bts = PV_ARRAY_BTS;
lookup->pv = (PV_ARRAY_TYPE*)calloc(kNumWords >> lookup->pv_array_bts,
sizeof(PV_ARRAY_TYPE));
if (!lookup->pv) {
return BLASTERR_MEMORY;
}
s_NaHashLookupFillPV(query, locations, lookup);
s_NaHashLookupRemovePolyAWords(lookup);
/* count words in the database */
if (opt->db_filter) {
s_NaHashLookupScanSubjectForWordCounts(seqsrc, lookup, num_threads,
opt->max_db_word_count);
}
/* find number of unique query words */
for (i = 0;i < kNumWords >> lookup->pv_array_bts; i++) {
num_unique_words += s_Popcount(lookup->pv[i]);
}
/* find number of bits to use for hash function */
while (num_hash_bits < 32 &&
(1LL << num_hash_bits) < num_unique_words) {
num_hash_bits++;
}
lookup->backbone_size = 1 << num_hash_bits;
lookup->mask = lookup->backbone_size - 1;
thin_backbone = calloc(lookup->backbone_size, sizeof(BackboneCell));
if (!thin_backbone) {
return BLASTERR_MEMORY;
}
/* it will store 1-based offsets, hence length + 1 */
offsets = calloc(query->length + 1, sizeof(Int4));
if (!offsets) {
return BLASTERR_MEMORY;
}
BlastHashLookupIndexQueryExactMatches(thin_backbone,
offsets,
lookup->word_length,
BITS_PER_NUC,
lookup->lut_word_length,
query, locations,
lookup->hash_callback,
lookup->mask,
lookup->pv);
if (locations &&
lookup->word_length > lookup->lut_word_length &&
s_HasMaskAtHashEnabled(query_options)) {
lookup->masked_locations = s_SeqLocListInvert(locations, query->length);
}
s_BlastNaHashLookupFinalize(thin_backbone, offsets, lookup);
sfree(thin_backbone);
sfree(offsets);
return 0;
}
|
GB_unaryop__minv_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_fp32
// op(A') function: GB_tran__minv_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_fp32
(
uint8_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | #pragma once
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifdef _WIN32
#include <filesystem>
#else
#include <dirent.h>
#include <sys/types.h>
#endif
namespace PaddleSolution {
namespace utils {
inline std::string path_join(const std::string& dir, const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
return imgs;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(const std::string& path, const std::string& exts)
{
std::vector<std::string> imgs;
for (const auto& item : std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path, item.path().filename().string());
imgs.push_back(item.path().string());
}
}
return imgs;
}
#endif
// normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
double normf = (double)1.0 / 255.0;
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel = static_cast<float>(ptr[im_index++]);
pixel = (pixel * normf - fmean[c]) / fstd[c];
data[top_index] = pixel;
}
}
}
}
// argmax
inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int out_img_len = shape[1] * shape[2];
int blob_out_len = out_img_len * shape[0];
/*
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
*/
float max_value = -1;
int label = 0;
#pragma omp parallel private(label)
for (int i = 0; i < out_img_len; ++i) {
max_value = -1;
label = 0;
#pragma omp for reduction(max : max_value)
for (int j = 0; j < shape[0]; ++j) {
int index = i + j * out_img_len;
if (index >= blob_out_len) {
continue;
}
float value = out[index];
if (value > max_value) {
max_value = value;
label = j;
}
}
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
}
}
}
}
|
RegionGrowing.h | #ifndef REGIONGROWING_H
#define REGIONGROWING_H
#include <iostream>
#include<vector>
#include <opencv2/opencv.hpp>
#include"Training.h"
#include"GroundTruth.h"
#include <omp.h>
using namespace std;
using namespace cv;
class RegionGrowing{
IplImage *img;
Mat image,gray_image;
vector <Region> regions;
vector<vector<int>> regionsLabels;
vector<int> regionsCounter;
vector<vector<OneRegion>> adjacentRegions;
vector<Stat> trainingData;
vector<float> Priors;
int noOfRegions;
vector<float> colorTDM,colorTDNM,textureTDM,textureTDNM,arrangementTDM,arrangementTDNM; //TDM: Training Data Merged TDNM: Training Data Not Merged
Mat checkMatrix;
float calculateArrangement(int reg1,int reg2){
const int neighborX[4] = {-1, 0, 0, 1};
const int neighborY[4] = { 0, -1, 1, 0};
float count=0;
for(int i=0;i<regions[reg1].getNoOfBoundaryPixels();i++){
bool flag=false;
Coord p=regions[reg1].getBoundaryPixel(i);
for (int k = 0; k < 4; k++) {
int x = p.col + neighborX[k], y = p.row + neighborY[k];
Coord temp(y,x);
if(regions[reg2].findBoundaryPixel(temp)){
count++;
}
}
}
float totalBoundaryPixels=regions[reg1].getNoOfBoundaryPixels()+regions[reg2].getNoOfBoundaryPixels();
return (count/totalBoundaryPixels);
}
void displayContours(char *name){
IplImage *contourImage = cvCloneImage(img);
for (int i = 0; i < regions.size(); i++) {
if(regionsCounter[i]==i){
for (int j = 0; j < regions[i].getNoOfBoundaryPixels(); j++) {
Coord pixel=regions[i].getBoundaryPixel(j);
int row=pixel.row, col=pixel.col;
cvSet2D(contourImage, row,col, CV_RGB(255,0,0));
}
}
}
cvShowImage(name, contourImage);
}
void displayBoundaryOfaRegion(int i,char *name){
IplImage *contourImage = cvCloneImage(img);
for (int j = 0; j < regions[i].getNoOfBoundaryPixels(); j++) {
Coord pixel=regions[i].getBoundaryPixel(j);
int row=pixel.row, col=pixel.col;
cvSet2D(contourImage, row,col, CV_RGB(255,0,0));
}
cvShowImage(name,contourImage);
}
void Merge(int r1,int r2){
Region *reg1=®ions[r1];
Region *reg2=®ions[r2];
int curr=regionsLabels[reg1->getAt(0).col][reg1->getAt(0).row];
for(int i=0;i<reg2->getNoOfPixels();i++){
Coord pixel=reg2->getAt(i);
reg1->addPixel(pixel);
regionsLabels[pixel.col][pixel.row]=curr;
}
for(int i=0;i<reg2->getNoOfBoundaryPixels();i++)
reg1->addBoundaryPixel(reg2->getBoundaryPixel(i));
reg1->adjustBoundaryPixels(regionsLabels);
for(int i=0;i<regionsCounter.size();i++){
if(regionsCounter[i]==r2)
regionsCounter[i]=r1;
}
noOfRegions--;
}
int Probability(int reg) {
float maxProb =-1;
int actualRegion=regionsCounter[reg];
Region *current=®ions[actualRegion];
//define bandwidth of kernels
float hColor = 3.4078, hColorNOT = 4.4450;
float hText = 39.8589, hTextNOT = 30.7168;
float hArr= 0.0104, hArrNOT = 0.0047;
int maxProbRegion=0;
for(int i=0;i<adjacentRegions[reg].size();i++){
int adjacentReg=adjacentRegions[reg][i].region;
adjacentReg=regionsCounter[adjacentReg];
if(actualRegion!=adjacentReg){
Region *adjacent= ®ions[adjacentReg];
float Diffcolor=sqrt(pow((current->calculateGLColorMeanValue(0)- adjacent->calculateGLColorMeanValue(0)),2)+
pow((current->calculateGLColorMeanValue(1)- adjacent->calculateGLColorMeanValue(1)),2)+
pow((current->calculateGLColorMeanValue(2)- adjacent->calculateGLColorMeanValue(2)),2));
float DiffTexture=abs(current->calculateGLTexture()-adjacent->calculateGLTexture());
float Arrangement=adjacentRegions[reg][i].arrangement;
float result=0; //PmergeNOT,, Ptexture, Parrangement;
vector<float> LikeColor, LikeTexture, LikeArrangement, LikeEntroy;
#pragma omp parallel sections
{
#pragma omp section
{
LikeColor = likelihoods(Diffcolor, 0, hColor, hColorNOT);
}
#pragma omp section
{
LikeTexture = likelihoods(DiffTexture, 1, hText, hTextNOT);
}
#pragma omp section
{
LikeArrangement = likelihoods(Arrangement, 2, hArr, hArrNOT);
}
}
result = (LikeColor[0]*LikeTexture[0]*LikeArrangement[0]*Priors[0])/
((LikeColor[0]*LikeTexture[0]*LikeArrangement[0]*Priors[0])+(LikeColor[1]*LikeTexture[1]*LikeArrangement[1]*Priors[1]));
if (result > 1){
cout << "Prob higher than 1: " << result << endl;
}
if(result>maxProb){
maxProb=result;
maxProbRegion=adjacentReg;
}
}
}
// define merging threshold
if(maxProb<0.5){
maxProbRegion=-1;
}
return maxProbRegion;
}
//compute prior probabilites
vector<float> PriorProbs(){
float SUMmerged=0;
float PriorMerged, PriorMergedNot; //Priors;
vector<float> output(2); //1 PriorMerged, 2 PriorNotmerged
for(int i=0; i < trainingData.size(); i++) {
if(trainingData[i].isMerged){
SUMmerged += 1;
}
}
PriorMerged = SUMmerged / trainingData.size();
PriorMergedNot = 1 - PriorMerged;
output[0] = PriorMerged;
output[1] = PriorMergedNot;
return output;
}
//compute likelihood for a feature vector
vector<float> likelihoods(float input, const int col, const float hmerged, const float hNotmerged){
float likePnotMerged=0, likePmerged=0; //likelihoods
vector<float> trainDataMerged, trainDataNOTMerged; //to copy data from trainingData
double pi = 3.1415926535897; //define pi for Gaussian KDE
vector<float> output(2); //vector to return likelihoods
if(col==0){
trainDataMerged=colorTDM;
trainDataNOTMerged=colorTDNM;
}
else if(col==1){
trainDataMerged=textureTDM;
trainDataNOTMerged=textureTDNM;
}
else{
trainDataMerged=arrangementTDM;
trainDataNOTMerged=arrangementTDNM;
}
#pragma omp parallel sections
{
#pragma omp section
{
for (int i=0; i < trainDataMerged.size(); i++){
likePmerged += (1/(sqrt((2*pi*pow(hmerged,2)))))*exp(-((pow(abs(input - trainDataMerged[i]),2))/(2*hmerged*hmerged)));
}
likePmerged /= trainDataMerged.size();
}
#pragma omp section
{
//calculate probability density of likelihood P(x|mergeNOT)
for (int i=0; i < trainDataNOTMerged.size(); i++){
likePnotMerged += (1/(sqrt((2*pi*pow(hNotmerged,2)))))*exp(-((pow(abs(input - trainDataNOTMerged[i]),2))/(2*hNotmerged*hNotmerged)));
}
likePnotMerged /= trainDataNOTMerged.size();
}
}
//hand over values of the likelihood values to output vector
output[0] = likePmerged;
output[1] = likePnotMerged;
return output;
}
void calculateLikelihoodDataVectors(){
//For Color
for(int i=0; i < trainingData.size(); i++){
if(trainingData[i].isMerged){
colorTDM.push_back( trainingData[i].values[0]);
}
else{
colorTDNM.push_back(trainingData[i].values[0]);
}
}
//For Texture
for(int i=0; i < trainingData.size(); i++){
if(trainingData[i].isMerged){
textureTDM.push_back( trainingData[i].values[1]);
}
else{
textureTDNM.push_back(trainingData[i].values[1]);
}
}
//For Arrangement
for(int i=0; i < trainingData.size(); i++){
if(trainingData[i].isMerged){
arrangementTDM.push_back( trainingData[i].values[2]);
}
else{
arrangementTDNM.push_back(trainingData[i].values[2]);
}
}
}
void RunSLIC(const char* imagePath,int noOfSuperpixels,int nc){
cout<<"\nRunning SLIC...\t\t\t\t";
image=imread(imagePath,CV_LOAD_IMAGE_UNCHANGED);
cvtColor( image, gray_image, COLOR_BGR2GRAY );
img = cvLoadImage(imagePath, 1);
IplImage *lab_image = cvCloneImage(img);
cvCvtColor(img, lab_image, CV_BGR2Lab);
/* Yield the number of superpixels and weight-factors from the user. */
int w = img->width, h = img->height;
double step = sqrt((w * h) / (double) noOfSuperpixels);
/* Perform the SLIC superpixel algorithm. */
Slic slic;
slic.generate_superpixels(lab_image, step, nc);
slic.create_connectivity(lab_image);
for(int i=0;i<noOfSuperpixels+1000;i++){
//Region newRegion(&image);
Region newRegion(&image,&gray_image);
regions.push_back(newRegion);
}
slic.getResults(img,regions,regionsLabels);
noOfRegions=regions.size();
for(int i=0;i<regions.size();i++){
regionsCounter.push_back(i);
}
}
void FindAdjacentRegions(string adjacentRegionsFile){
cout<<"\nFinding Adjacent Regions...\t\t";
vector<vector<bool>> adjacentRegionsCheck;
for(int i=0;i<regions.size();i++){
vector<bool> temp;
for(int j=0;j<regions.size();j++){
temp.push_back(false);
}
adjacentRegionsCheck.push_back(temp);
}
for(int i=0;i<regions.size();i++){
vector<OneRegion> temp;
adjacentRegions.push_back(temp);
}
double x=regions.size();
double x2=-4.96281576*pow(10,-5)*pow(x,2);
double x1=1.170450583*0.1*x;
int y=x2+x1 + 10.32622578;
y=y+10;
ofstream fout(adjacentRegionsFile);
for(int i=0;i<regions.size();i++){
int j=i-y,k=i+y;
if(j<0)
j=0;
if(k>regions.size())
k=regions.size();
for(;j<k;j++){
if(i!=j){
float arg=calculateArrangement(i,j);
if(arg>0){
adjacentRegions[i].push_back(OneRegion(j,arg));
fout<<i<<" "<<j<<" "<<arg<<endl;
adjacentRegionsCheck[i][j]=true;
adjacentRegionsCheck[j][i]=true;
}
}
}
}
}
void ReadAdjacentRegionsFromFile(string adjacentRegionsFile){
cout<<"\nReading Adjacent Regions From File... ";
for(int i=0;i<regions.size();i++){
vector<OneRegion> temp;
adjacentRegions.push_back(temp);
}
ifstream fin(adjacentRegionsFile);
int r1,r2;
float a;
while(!fin.eof()){
fin>>r1;
fin>>r2;
fin>>a;
adjacentRegions[r1].push_back(OneRegion(r2,a));
}
}
void Perform(){
cout<<"\nPerforming...\t\t\t\t";
for(int a=1;noOfRegions>1;a++){
bool flag=false;
for(int i=0;i<adjacentRegions.size();i++){
if(adjacentRegions[i].size()!=0){
int mergeTo=Probability(i);
if(mergeTo!=-1){
int actualRegion=regionsCounter[i];
Merge(actualRegion,mergeTo);
flag=true;
}
}
}
if(!flag)
break;
}
}
void SaveResult(const char *resultImagePath){
IplImage* resultImg=cvCloneImage(img);
for (int i = 0; i < regions.size(); i++) {
if(regionsCounter[i]==i){
for (int j = 0; j < regions[i].getNoOfBoundaryPixels(); j++) {
Coord pixel=regions[i].getBoundaryPixel(j);
int row=pixel.row, col=pixel.col;
cvSet2D(resultImg, row,col, CV_RGB(255,0,0));
}
}
}
cvSaveImage(resultImagePath,resultImg);
}
void SaveBoundaryMapImage(const char *boundaryMapImagePath){
IplImage* boundaryMap = cvCreateImage(cvSize(image.size().width,image.size().height), 8, 1);
cvZero(boundaryMap);
int count=0;
for (int i = 0; i < regions.size(); i++) {
if(regionsCounter[i]==i){
count++;
for (int j = 0; j < regions[i].getNoOfBoundaryPixels(); j++) {
Coord pixel=regions[i].getBoundaryPixel(j);
int row=pixel.row, col=pixel.col;
cvSet2D(boundaryMap, row,col, 255);
}
}
}
cvSaveImage(boundaryMapImagePath,boundaryMap);
}
void ClearData(){
regions.clear();
regionsLabels.clear();
regionsCounter.clear();
adjacentRegions.clear();
trainingData.clear();
}
public:
RegionGrowing(){
checkMatrix = Mat::zeros(image.size(), CV_8UC1);
}
void Start(string name,const char* imagePath,int noOfSuperpixels,int nc,string adjacentRegionsFile,bool isAdjacentFile,string trainingFile,const char* resultImagePath,const char *boundaryMapImagePath){
cout<<"\t\t\t * Testing "<<name<<" *\n";
Timer totalTime,localTime;
totalTime.Start();
localTime.Start();
RunSLIC(imagePath,noOfSuperpixels,nc);
localTime.LocalEnd();
cout<<" Superpixels: "<<regions.size();
localTime.Start();
if(isAdjacentFile)
ReadAdjacentRegionsFromFile(adjacentRegionsFile);
else
FindAdjacentRegions(adjacentRegionsFile);
localTime.LocalEnd();
Training train;
trainingData=train.GetTrainingData(trainingFile);
cout<<"\nTraining Size: "<<trainingData.size();
Priors=PriorProbs();
//PriorProbs(Priors);
calculateLikelihoodDataVectors();
localTime.Start();
Perform();
localTime.LocalEnd();
SaveResult(resultImagePath);
SaveBoundaryMapImage(boundaryMapImagePath);
ClearData();
totalTime.TotalEnd();
}
};
#endif |
edge_data.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 08:26:51 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_EDGE_DATA_H_INCLUDED )
#define KRATOS_EDGE_DATA_H_INCLUDED
//we suggest defining the following macro
#define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
//we suggest defining the following macro
#define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
// template<unsigned int TDim>
// class EdgeConstructionScratch
// {
// public:
// array_1d<double, TDim+1> N;
// boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx;
// double volume;
// double weighting_factor = 1.0 / static_cast<double>(TDim+1);
// boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent;
// array_1d<double, TDim+1> mass_lumped;
// array_1d<unsigned int, TDim+1> nodal_indices;
// array_1d<double, TDim+1> heights;
//
// }
//structure definition for fast access to edge data using CSR format
template<unsigned int TDim>
class EdgesStructureType
{
public:
//component ij of the consistent mass matrix (M = Ni * Nj * dOmega)
double Mass;
//components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega)
//double Laplacian;
boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ;
//components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega)
array_1d<double, TDim> Ni_DNj;
//components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega)
//TRANSPOSED GRADIENT
array_1d<double, TDim> DNi_Nj;
//*************************************************************************************
//*************************************************************************************
//gradient integrated by parts
//RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i
//ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually"
inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
}
inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += Ni_DNj[k]*v[k]
inline void Add_D_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
}
inline void Sub_D_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += Ni_DNj pj
inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] += Ni_DNj[comp] * (p_j - p_i);
}
inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] -= Ni_DNj[comp] * (p_j - p_i);
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += DNi_Nj[k]*v[k]
inline void Add_div_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
}
inline void Sub_div_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
}
//*************************************************************************************
//*************************************************************************************
//gets the trace of the laplacian matrix
inline void CalculateScalarLaplacian(double& l_ij)
{
l_ij = LaplacianIJ(0, 0);
for (unsigned int comp = 1; comp < TDim; comp++)
l_ij += LaplacianIJ(comp, comp);
}
inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
#endif
}
inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
#endif
}
inline void Sub_ConvectiveContribution(double& destination,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
destination -= temp * (phi_j - phi_i);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
destination -= aux_j * phi_j - aux_i * phi_i;
#endif
}
inline void Add_ConvectiveContribution(double& destination,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
destination += temp * (phi_j - phi_i);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
destination += aux_j * phi_j - aux_i * phi_i;
#endif
}
//*************************************************************************************
//*************************************************************************************
inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
double conv_stab = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]);
// double temp = 0.0;
// double lij = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// lij += LaplacianIJ(k_comp,k_comp);
// temp = a_i[k_comp] * a_i[k_comp];
// }
//
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]);
}
// inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low,
// const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i,
// const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j
// )
// {
// double conv_stab = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp);
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]);
//
//// adding pressure
// double press_diff = p_j-p_i;
// for (unsigned int j_comp = 0; j_comp < TDim; j_comp++)
// {
// for (unsigned int i_comp = 0; i_comp < TDim; i_comp++)
// stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ;
// }
//
//
// }
inline void CalculateConvectionStabilization_LOW(double& stab_low,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
double conv_stab = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
stab_low = conv_stab * (phi_j - phi_i);
}
//*************************************************************************************
//*************************************************************************************
inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
double temp = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct
// double temp_i = 0.0;
// double temp_j = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// temp_j += a_i[k_comp] * Ni_DNj[k_comp];
// temp_i += a_i[k_comp] * DNi_Nj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct
// double temp_i = 0.0;
// double temp_j = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// temp_i += a_i[k_comp] * Ni_DNj[k_comp];
// temp_j += a_i[k_comp] * DNi_Nj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]);
#endif
}
inline void CalculateConvectionStabilization_HIGH(double& stab_high,
const array_1d<double, TDim>& a_i, const double& pi_i,
const array_1d<double, TDim>& a_j, const double& pi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
stab_high = -(aux_j * pi_j - aux_i * pi_i);
#endif
}
//*************************************************************************************
//*************************************************************************************
inline void Add_StabContribution(array_1d<double, TDim>& destination,
const double tau, const double beta,
const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high)
{
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
}
inline void Add_StabContribution(double& destination,
const double tau, const double beta,
const double& stab_low, const double& stab_high)
{
destination += tau * (stab_low - beta * stab_high);
}
inline void Sub_StabContribution(array_1d<double, TDim>& destination,
const double tau, const double beta,
const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high)
{
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
}
inline void Sub_StabContribution(double& destination,
const double tau, const double beta,
const double& stab_low, const double& stab_high)
{
destination -= tau * (stab_low - beta * stab_high);
}
//*************************************************************************************
//*************************************************************************************
inline void Add_ViscousContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& U_i, const double& nu_i,
const array_1d<double, TDim>& U_j, const double& nu_j)
{
//calculate scalar laplacian
double L = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
L += LaplacianIJ(l_comp, l_comp);
//double nu_avg = 0.5*(nu_i+nu_j);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]);
}
inline void Sub_ViscousContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& U_i, const double& nu_i,
const array_1d<double, TDim>& U_j, const double& nu_j)
{
//calculate scalar laplacian
double L = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
L += LaplacianIJ(l_comp, l_comp);
//double nu_avg = 0.5*(nu_i+nu_j);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]);
}
};
//class definition of matrices using CSR format
template<unsigned int TDim, class TSparseSpace>
class MatrixContainer
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//names for separately stored node based values
typedef vector<double> ValuesVectorType;
// typedef std::vector< array_1d<double,TDim> > CalcVectorType;
typedef vector< array_1d<double, TDim> > CalcVectorType;
//constructor and destructor
MatrixContainer()
{
};
~MatrixContainer()
{
};
//functions to return private values
inline unsigned int GetNumberEdges()
{
return mNumberEdges;
}
inline EdgesVectorType& GetEdgeValues()
{
return mNonzeroEdgeValues;
}
inline IndicesVectorType& GetColumnIndex()
{
return mColumnIndex;
}
inline IndicesVectorType& GetRowStartIndex()
{
return mRowStartIndex;
}
inline ValuesVectorType& GetLumpedMass()
{
return mLumpedMassMatrix;
}
inline ValuesVectorType& GetInvertedMass()
{
return mInvertedMassMatrix;
}
inline CalcVectorType& GetDiagGradient()
{
return mDiagGradientMatrix;
}
inline ValuesVectorType& GetHmin()
{
return mHmin;
}
//********************************************************
//function to size and initialize the vector of CSR tuples
void ConstructCSRVector(ModelPart& model_part)
{
KRATOS_TRY
//SIZE OF CSR VECTOR
//defining the number of nodes and edges
int n_nodes = model_part.Nodes().size();
//remark: no colouring algorithm is used here (symmetry is neglected)
// respectively edge ij is considered different from edge ji
mNumberEdges = 0;
//counter to assign and get global nodal index
int i_node = 0;
//counting the edges connecting the nodes
for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++)
{
//counting neighbours of each node
mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size();
//DIAGONAL TERMS
//mNumberEdges++;
//assigning global index to each node
node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++);
}
//error message in case number of nodes does not coincide with number of indices
if (i_node != n_nodes)
KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!");
//allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation
mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues);
mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex);
mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex);
mLumpedMassMatrix.resize(n_nodes);
SetToZero(mLumpedMassMatrix);
mInvertedMassMatrix.resize(n_nodes);
SetToZero(mInvertedMassMatrix);
mDiagGradientMatrix.resize(n_nodes);
SetToZero(mDiagGradientMatrix);
mHmin.resize(n_nodes);
SetToZero(mHmin);
//INITIALIZING OF THE CSR VECTOR
//temporary variable as the row start index of a node depends on the number of neighbours of the previous one
unsigned int row_start_temp = 0;
int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<int> row_partition(number_of_threads);
OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition);
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (OpenMPUtils::ThisThread() == k)
{
for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++)
{
typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i;
//main loop over all nodes
// for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++)
// {
//getting the global index of the node
i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//determining its neighbours
GlobalPointersVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES);
//number of neighbours of node i determines row start index for the following node
unsigned int n_neighbours = neighb_nodes.size();
//DIAGONAL TERMS
//n_neighbours++;
//reserving memory for work array
std::vector<unsigned int> work_array;
work_array.reserve(n_neighbours);
//DIAGONAL TERMS
//work_array.push_back(i_node);
//nested loop over the neighbouring nodes
for (GlobalPointersVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++)
{
//getting global index of the neighbouring node
work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX)));
}
//reordering neighbours following their global indices
std::sort(work_array.begin(), work_array.end());
//setting current row start index
mRowStartIndex[i_node] = row_start_temp;
//nested loop over the by now ordered neighbours
for (unsigned int counter = 0; counter < n_neighbours; counter++)
{
//getting global index of the neighbouring node
unsigned int j_neighbour = work_array[counter];
//calculating CSR index
unsigned int csr_index = mRowStartIndex[i_node] + counter;
//saving column index j of the original matrix
mColumnIndex[csr_index] = j_neighbour;
//initializing the CSR vector entries with zero
mNonzeroEdgeValues[csr_index].Mass = 0.0;
//mNonzeroEdgeValues[csr_index].Laplacian = 0.0;
noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim);
noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim);
//TRANSPOSED GRADIENT
noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim);
}
//preparing row start index for next node
row_start_temp += n_neighbours;
}
}
}
//adding last entry (necessary for abort criterion of loops)
mRowStartIndex[n_nodes] = mNumberEdges;
//INITIALIZING NODE BASED VALUES
//lumped mass matrix (elements Mi)
/* #pragma omp parallel for
for (int i_node=0; i_node<n_nodes; i_node++)
mLumpedMassMatrix[i_node] = 0.0;*/
#pragma omp parallel for
//set the heights to a huge number
for (int i_node = 0; i_node < n_nodes; i_node++)
mHmin[i_node] = 1e10;
//diagonal of gradient matrix (elements Gii)
// #pragma omp parallel for
// for (int i_node=0; i_node<n_nodes; i_node++)
// noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim);
KRATOS_CATCH("")
}
//*********************************
//function to precalculate CSR data
void BuildCSRData(ModelPart& model_part)
{
KRATOS_TRY
//PRECALCULATING CSR DATA
//defining temporary local variables for elementwise addition
//shape functions
array_1d<double, TDim + 1 > N;
//shape function derivatives
boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx;
//volume
double volume;
//weighting factor
double weighting_factor = 1.0 / static_cast<double> (TDim + 1);
//elemental matrices
boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent;
//boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian;
array_1d<double, TDim + 1 > mass_lumped;
//global indices of elemental nodes
array_1d<unsigned int, TDim + 1 > nodal_indices;
array_1d<double, TDim + 1 > heights;
//loop over all elements
for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++)
{
//LOCAL ELEMENTWISE CALCULATIONS
//getting geometry data of the element
GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume);
//calculate lenght of the heights of the element
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0);
for (unsigned int comp = 1; comp < TDim; comp++)
{
heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp);
}
heights[ie_node] = 1.0 / sqrt(heights[ie_node]);
// KRATOS_WATCH(heights);
}
//setting up elemental mass matrices
CalculateMassMatrix(mass_consistent, volume);
noalias(mass_lumped) = ZeroVector(TDim + 1);
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
for (unsigned int je_node = 0; je_node <= TDim; je_node++)
{
//mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume;
mass_lumped[ie_node] += mass_consistent(ie_node, je_node);
}
//mass_lumped[ie_node] = volume * N[ie_node];
}
/*OLD DATA STRUCTURE
//calculating elemental laplacian matrix
noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1);
for (unsigned int ie_node=0; ie_node<=TDim; ie_node++)
for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++)
//componentwise multiplication
for (unsigned int component=0; component<TDim; component++)
{
//taking advantage of symmetry
double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume;
laplacian(ie_node,je_node) += temp;
laplacian(je_node,ie_node) += temp;
}
//multiply gradient with volume referring to each gauss point
dN_dx *= (volume / double(TDim+1));*/
//(corresponding to Ni * dOmega respectively Nj * dOmega)
double weighted_volume = volume * weighting_factor;
//ASSEMBLING GLOBAL DATA STRUCTURE
//loop over the nodes of the element to determine their global indices
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX));
//assembling global "edge matrices" by adding local contributions
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
//check the heights and change the value if minimal is found
if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node])
mHmin[ nodal_indices[ie_node] ] = heights[ie_node];
for (unsigned int je_node = 0; je_node <= TDim; je_node++)
{
//remark: there is no edge linking node i with itself!
//DIAGONAL TERMS
if (ie_node != je_node)
{
//calculating CSR index from global index
unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]);
//assigning precalculated element data to the referring edges
//contribution to edge mass
mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node);
//contribution to edge laplacian
/*OLD DATA STRUCTURE
mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/
boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume;
//contribution to edge gradient
array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
//gradient[l_comp] += dN_dx(je_node,l_comp);
gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume;
//TRANSPOSED GRADIENT
//contribution to transposed edge gradient
array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
//transp_gradient[l_comp] += dN_dx(ie_node,l_comp);
transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume;
}
}
}
//assembling node based vectors
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
//diagonal of the global lumped mass matrix
mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node];
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
//diagonal of the global gradient matrix
array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]];
for (unsigned int component = 0; component < TDim; component++)
//gradient[component] += dN_dx(ie_node,component);
gradient[component] += dN_dx(ie_node, component) * weighted_volume;
}
}
//copy mass matrix to inverted mass matrix
for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++)
{
mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode];
}
//perform MPI syncronization between the domains
//calculating inverted mass matrix (this requires syncronization for MPI paraellelism
for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++)
{
mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode];
}
KRATOS_CATCH("")
}
//******************************************
//function to calculate CSR index of edge ij
unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ)
{
KRATOS_TRY
//index indicating data position of edge ij
unsigned int csr_index;
//searching for coincidence of stored column index and neighbour index j
for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
if (mColumnIndex[csr_index] == NeighbourJ)
break;
//returning CSR index of edge ij
return csr_index;
KRATOS_CATCH("")
}
//***********************************************
//function to get pointer to CSR tuple of edge ij
CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ)
{
KRATOS_TRY
//index indicating data position of edge ij
unsigned int csr_index;
//searching for coincidence of stored column index and neighbour index j
for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
if (mColumnIndex[csr_index] == NeighbourJ)
break;
//returning pointer to CSR tuple of edge ij
return &mNonzeroEdgeValues[csr_index];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mNonzeroEdgeValues.clear();
mColumnIndex.clear();
mRowStartIndex.clear();
mInvertedMassMatrix.clear();
mLumpedMassMatrix.clear();
mDiagGradientMatrix.clear();
mHmin.clear();
KRATOS_CATCH("")
}
//****************************
//functions to access database
//(note that this is already thought for parallel;
// for a single processor this could be done in a faster way)
void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
#pragma omp parallel for firstprivate(n_nodes, it_begin)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = (*node_it)[component];
}
KRATOS_CATCH("");
}
//****************************
//functions to access database
//(note that this is already thought for parallel;
// for a single processor this could be done in a faster way)
void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested value in vector form
array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = vector[component];
}
KRATOS_CATCH("");
}
void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested value in vector form
array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = vector[component];
}
KRATOS_CATCH("");
}
void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested scalar value
double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save value in the destination vector
rDestination[i_node] = scalar;
}
KRATOS_CATCH("");
}
void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested scalar value
double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
//save value in the destination vector
rDestination[i_node] = scalar;
}
KRATOS_CATCH("");
}
void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get reference of destination
array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save vector in database
for (unsigned int component = 0; component < TDim; component++)
vector[component] = (rOrigin[i_node])[component];
}
KRATOS_CATCH("");
}
void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
int i_node = i;
//get reference of destination
double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save scalar in database
scalar = rOrigin[i_node];
}
KRATOS_CATCH("");
}
//*********************************************************************
//destination = origin1 + value * Minv*origin
void Add_Minv_value(
CalcVectorType& destination,
const CalcVectorType& origin1,
const double value,
const ValuesVectorType& Minv_vec,
const CalcVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& dest = destination[i_node];
const double m_inv = Minv_vec[i_node];
const array_1d<double, TDim>& origin_vec1 = origin1[i_node];
const array_1d<double, TDim>& origin_value = origin[i_node];
double temp = value * m_inv;
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = origin_vec1[comp] + temp * origin_value[comp];
}
KRATOS_CATCH("")
}
void Add_Minv_value(
ValuesVectorType& destination,
const ValuesVectorType& origin1,
const double value,
const ValuesVectorType& Minv_vec,
const ValuesVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
double& dest = destination[i_node];
const double m_inv = Minv_vec[i_node];
const double& origin_vec1 = origin1[i_node];
const double& origin_value = origin[i_node];
double temp = value * m_inv;
dest = origin_vec1 + temp * origin_value;
}
KRATOS_CATCH("")
}
//**********************************************************************
void AllocateAndSetToZero(CalcVectorType& data_vector, int size)
{
data_vector.resize(size);
int loop_size = size;
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& aaa = data_vector[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
aaa[comp] = 0.0;
}
}
void AllocateAndSetToZero(ValuesVectorType& data_vector, int size)
{
data_vector.resize(size);
int loop_size = size;
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
data_vector[i_node] = 0.0;
;
}
}
//**********************************************************************
void SetToZero(CalcVectorType& data_vector)
{
int loop_size = data_vector.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& aaa = data_vector[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
aaa[comp] = 0.0;
}
}
void SetToZero(ValuesVectorType& data_vector)
{
int loop_size = data_vector.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
data_vector[i_node] = 0.0;
;
}
}
//**********************************************************************
void AssignVectorToVector(const CalcVectorType& origin,
CalcVectorType& destination
)
{
int loop_size = origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
const array_1d<double, TDim>& orig = origin[i_node];
array_1d<double, TDim>& dest = destination[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = orig[comp];
}
}
void AssignVectorToVector(const ValuesVectorType& origin,
ValuesVectorType& destination
)
{
int loop_size = origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
destination[i_node] = origin[i_node];
}
}
private:
//number of edges
unsigned int mNumberEdges;
//CSR data vector for storage of the G, L and consistent M components of edge ij
EdgesVectorType mNonzeroEdgeValues;
//vector to store column indices of nonzero matrix elements for each row
IndicesVectorType mColumnIndex;
//index vector to access the start of matrix row i in the column vector
IndicesVectorType mRowStartIndex;
//inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours)
ValuesVectorType mInvertedMassMatrix;
//minimum height around one node
ValuesVectorType mHmin;
//lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix)
ValuesVectorType mLumpedMassMatrix;
//diagonal of the gradient matrix (separately stored due to special calculations)
CalcVectorType mDiagGradientMatrix;
//*******************************************
//functions to set up elemental mass matrices
void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume)
{
for (unsigned int i_node = 0; i_node <= TDim; i_node++)
{
//diagonal terms
mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6
//non-diagonal terms
double temp = 0.08333333333333333333 * volume; // 1/12
for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
{
//taking advantage of symmetry
mass_consistent(i_node, j_neighbour) = temp;
mass_consistent(j_neighbour, i_node) = temp;
}
}
}
void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume)
{
for (unsigned int i_node = 0; i_node <= TDim; i_node++)
{
//diagonal terms
mass_consistent(i_node, i_node) = 0.1 * volume;
//non-diagonal terms
double temp = 0.05 * volume;
for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
{
//taking advantage of symmetry
mass_consistent(i_node, j_neighbour) = temp;
mass_consistent(j_neighbour, i_node) = temp;
}
}
}
};
} //namespace Kratos
#endif //KRATOS_EDGE_DATA_H_INCLUDED defined
|
dormlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zunmlq.c, normal z -> d, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Overwrites the general complex m-by-n matrix C with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * C C * Q
* trans = PlasmaTrans Q^T * C C * Q^T
*
* where Q is an orthogonal (or orthogonal) matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_dgelqf. Q is of order m if side = PlasmaLeft
* and of order n if side = PlasmaRight.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - PlasmaTrans: apply Q^T.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] k
* The number of rows of elementary tile reflectors whose product
* defines the matrix Q.
* If side == PlasmaLeft, m >= k >= 0.
* If side == PlasmaRight, n >= k >= 0.
*
* @param[in] pA
* Details of the LQ factorization of the original matrix A as returned
* by plasma_dgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_dgelqf.
*
* @param[in,out] pC
* On entry, pointer to the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_dormlq
* @sa plasma_cunmlq
* @sa plasma_dormlq
* @sa plasma_sormlq
* @sa plasma_dgelqf
*
******************************************************************************/
int plasma_dormlq(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k,
double *pA, int lda,
plasma_desc_t T,
double *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int an;
if (side == PlasmaLeft) {
an = m;
}
else {
an = n;
}
if ((k < 0) || (k > an)) {
plasma_error("illegal value of k");
return -5;
}
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (m == 0 || n == 0 || k == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
k, an, 0, 0, k, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmlq: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_dormlq(side, trans,
A, T, C, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Non-blocking tile version of plasma_dormlq().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - PlasmaTrans: apply Q^T.
*
* @param[in] A
* Descriptor of matrix A stored in the tile layout.
* Details of the QR factorization of the original matrix A as returned
* by plasma_dgeqrf.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_dgeqrf.
*
* @param[in,out] C
* Descriptor of matrix C.
* On entry, the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dormlq
* @sa plasma_omp_cunmlq
* @sa plasma_omp_dormlq
* @sa plasma_omp_sormlq
* @sa plasma_omp_dgelqf
*
******************************************************************************/
void plasma_omp_dormlq(plasma_enum_t side, plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T, plasma_desc_t C,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("invalid value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("invalid value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pdormlq_tree(side, trans,
A, T, C,
work, sequence, request);
}
else {
plasma_pdormlq(side, trans,
A, T, C,
work, sequence, request);
}
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm"; break;
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image, ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->matte == MagickFalse || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
if (image->matte != MagickTrue)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->matte=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if (name_length % 2 == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length))
break;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (count < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((count > 3) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,
PixelPacket *q,IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel));
else
SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel));
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
GetPixelIndex(indexes+x));
if ((type == 0) && (channels > 1))
return;
else
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if (channels < 3 || type == -2)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case -3:
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case -4:
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels,
const size_t row,const ssize_t type,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+512))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if (status != MagickFalse && layer_info->mask.image != (Image *) NULL)
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
type=MagickAbsoluteValue(type+2);
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
StringInfo
*profile;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
SetImageColorspace(image,CMYKColorspace);
image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse;
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
psd_info.min_channels=1;
SetImageColorspace(image,GRAYColorspace);
image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse;
}
else
image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if (has_merged_image != MagickFalse || GetImageListLength(image) == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
SetImageAlphaChannel(image,TransparentAlphaChannel);
image->background_color.opacity=TransparentOpacity;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
(void) SetImageProfile(image,GetStringInfoName(profile),profile);
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->rows+
mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
GB_unop__asin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fc64_fc64)
// op(A') function: GB (_unop_tran__asin_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr57412.c | /* { dg-do compile } */
int thr;
#pragma omp threadprivate (thr)
int foo ()
{
int l;
#pragma omp parallel copyin (thr) reduction (||:l)
;
}
|
private-clauseModificado2.c | /*
* private-clause.c
*
* Created on: 02/04/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main() {
int i, n = 7;
int a[n];
int suma=0;
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel private(suma)
{
#pragma omp for
for (i = 0; i < n; i++) {
suma += a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
return 0;
}
|
SectionsBeginLink.c | int x;
int main() {
#pragma omp sections
{
#pragma omp section
{
int x;
}
}
}
|
common.h | #ifndef __COMMON_H__
#define __COMMON_H__
#include <string.h>
#include <string>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <math.h>
#include <algorithm>
#include <list>
#include <vector>
#include <complex>
#include <unistd.h>
#include <iostream>
#include <limits.h>
#include <random>
#include <cfloat>
#include "../shared/model.h"
/**
* labels corresponding to symmetry of each tensor dimension
* NS = 0 - nonsymmetric
* SY = 1 - symmetric
* AS = 2 - antisymmetric
* SH = 3 - symmetric hollow
*/
//enum SYM : int { NS, SY, AS, SH };
/**
* labels corresponding to symmetry or strucutre of entire tensor
* NS = 0 - nonsymmetric
* SY = 1 - symmetric
* AS = 2 - antisymmetric
* SH = 3 - symmetric hollow
* SP = 4 - sparse
*/
enum STRUCTURE : int { NS, SY, AS, SH, SP };
typedef STRUCTURE SYM;
namespace CTF {
/**
* \addtogroup CTF
* @{
*/
extern int DGTOG_SWITCH;
/**
* \brief reduction types for tensor data
* deprecated types: OP_NORM1=OP_SUMABS, OP_NORM2=call norm2(), OP_NORM_INFTY=OP_MAXABS
*/
enum OP { OP_SUM, OP_SUMABS, OP_SUMSQ, OP_MAX, OP_MIN, OP_MAXABS, OP_MINABS};
// sets flops counters to 0
void initialize_flops_counter();
// get analytically estimated flops, which are effectual flops in dense case, but estimates based on aggregate nonzero density for sparse case
int64_t get_estimated_flops();
/**
* @}
*/
}
namespace CTF_int {
/**
* \brief initialized random number generator
* \param[in] rank processor index
*/
void init_rng(int rank);
/**
* \brief returns new random number in [0,1)
*/
double get_rand48();
void handler();
#define IASSERT(...) \
do { if (!(__VA_ARGS__)){ int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (rank == 0){ printf("CTF ERROR: %s:%d, ASSERT(%s) failed\n",__FILE__,__LINE__,#__VA_ARGS__); } CTF_int::handler(); assert(__VA_ARGS__); } } while (0)
/**
* \brief computes the size of a tensor in SY (NOT HOLLOW) packed symmetric layout
* \param[in] order tensor dimension
* \param[in] len tensor edge _elngths
* \param[in] sym tensor symmetries
* \return size of tensor in packed layout
*/
int64_t sy_packed_size(int order, const int64_t * len, const int* sym);
/**
* \brief computes the size of a tensor in packed symmetric (SY, SH, or AS) layout
* \param[in] order tensor dimension
* \param[in] len tensor edge _elngths
* \param[in] sym tensor symmetries
* \return size of tensor in packed layout
*/
int64_t packed_size(int order, const int64_t * len, const int* sym);
enum { SUCCESS, ERROR, NEGATIVE };
template <typename type=char>
int conv_idx(int order,
type const * cidx,
int ** iidx);
template <typename type=char>
int conv_idx(int order_A,
type const * cidx_A,
int ** iidx_A,
int order_B,
type const * cidx_B,
int ** iidx_B);
template <typename type=char>
int conv_idx(int order_A,
type const * cidx_A,
int ** iidx_A,
int order_B,
type const * cidx_B,
int ** iidx_B,
int order_C,
type const * cidx_C,
int ** iidx_C);
int64_t * conv_to_int64(int const * arr, int len);
int * conv_to_int(int64_t const * arr, int len);
int64_t * copy_int64(int64_t const * arr, int len);
// accumulates computed flops (targeted for internal use)
void add_computed_flops(int64_t n);
// get computed flops
int64_t get_computed_flops();
// accumulates computed flops (targeted for internal use)
void add_estimated_flops(int64_t n);
class CommData {
public:
MPI_Comm cm;
int np;
int rank;
int color;
int alive;
int created;
CommData();
~CommData();
/** \brief copy constructor sets created to zero */
CommData(CommData const & other);
CommData& operator=(CommData const & other);
/**
* \brief create active communicator wrapper
* \param[in] cm MPI_Comm defining this wrapper
*/
CommData(MPI_Comm cm);
/**
* \brief create non-active communicator wrapper
* \param[in] rank rank within this comm
* \param[in] color identifier of comm within parent
* \param[in] np number of processors within this comm
*/
CommData(int rank, int color, int np);
/**
* \brief create active subcomm from parent comm which must be active
* \param[in] rank processor rank within subcomm
* \param[in] color identifier of subcomm within this comm
* \param[in] parent comm to split
*/
CommData(int rank, int color, CommData parent);
/**
* \brief activate this subcommunicator by splitting parent_comm
* \param[in] parent communicator to split
*/
void activate(MPI_Comm parent);
/* \brief deactivate (MPI_Free) this comm */
void deactivate();
/* \brief provide estimate of broadcast execution time */
double estimate_bcast_time(int64_t msg_sz);
/* \brief provide estimate of allreduction execution time */
double estimate_allred_time(int64_t msg_sz, MPI_Op op);
/* \brief provide estimate of reduction execution time */
double estimate_red_time(int64_t msg_sz, MPI_Op op);
/* \brief provide estimate of sparse reduction execution time */
// double estimate_csrred_time(int64_t msg_sz, MPI_Op op);
/* \brief provide estimate of all_to_all execution time */
double estimate_alltoall_time(int64_t chunk_sz);
/* \brief provide estimate of all_to_all_v execution time */
double estimate_alltoallv_time(int64_t tot_sz);
/**
* \brief broadcast, same interface as MPI_Bcast, but excluding the comm
*/
void bcast(void * buf, int64_t count, MPI_Datatype mdtype, int root);
/**
* \brief allreduce, same interface as MPI_Allreduce, but excluding the comm
*/
void allred(void * inbuf, void * outbuf, int64_t count, MPI_Datatype mdtype, MPI_Op op);
/**
* \brief reduce, same interface as MPI_Reduce, but excluding the comm
*/
void red(void * inbuf, void * outbuf, int64_t count, MPI_Datatype mdtype, MPI_Op op, int root);
/**
* \brief performs all-to-all-v with 64-bit integer counts and offset on arbitrary
* length types (datum_size), and uses point-to-point when all-to-all-v sparse
* \param[in] send_buffer data to send
* \param[in] send_counts number of datums to send to each process
* \param[in] send_displs displacements of datum sets in sen_buffer
* \param[in] datum_size size of MPI_datatype to use
* \param[in,out] recv_buffer data to recv
* \param[in] recv_counts number of datums to recv to each process
* \param[in] recv_displs displacements of datum sets in sen_buffer
*/
void all_to_allv(void * send_buffer,
int64_t const * send_counts,
int64_t const * send_displs,
int64_t datum_size,
void * recv_buffer,
int64_t const * recv_counts,
int64_t const * recv_displs);
};
using ipair = std::pair<int,int>;
struct CommGrid {
CommGrid(){};
~CommGrid(){};
CommGrid(ipair _rGrid, int _nNodes);
int nRanks;
std::vector<ipair> colorKey;
ipair rGrid; // RankGrid: given by the user
ipair nGrid; // NodeGrid: output, grid of nodes
ipair iGrid; // intraNodeGrid: the ranks of one node possess this grid
ipair getNodeGrid(int nNodes, ipair rGrid);
std::vector<int> factorize(int number);
ipair getSquare(int id, std::vector<int> factors);
};
int alloc_ptr(int64_t len, void ** const ptr);
int mst_alloc_ptr(int64_t len, void ** const ptr);
void * alloc(int64_t len);
void * mst_alloc(int64_t len);
int cdealloc(void * ptr);
void memprof_dealloc(void * ptr);
char * get_default_inds(int order, int start_index=0);
void cvrt_idx(int order,
int64_t const * lens,
int64_t idx,
int64_t ** idx_arr);
void cvrt_idx(int order,
int64_t const * lens,
int64_t idx,
int64_t * idx_arr);
void cvrt_idx(int order,
int64_t const * lens,
int64_t const * idx_arr,
int64_t * idx);
/**
* \brief gives a datatype for arbitrary datum_size, errors if exceeding 32-bits
*
* \param[in] count number of elements we want to communicate
* \param[in] datum_size element size
* \param[in] dt new datatype to pass to MPI routine
* \return whether the datatype is custom and needs to be freed
*/
bool get_mpi_dt(int64_t count, int64_t datum_size, MPI_Datatype & dt);
/**
* \brief compute prefix sum
* \param[in] n integer length of array
* \param[in] A array of input data of size n
* \param[in,out] B initially zero array of size n, on output B[i] = sum_{j=0}^i/stride A[j*stride]
*/
template <typename dtype>
void parallel_postfix(int64_t n, int64_t stride, dtype * B){
if ((n+stride-1)/stride <= 2){
if ((n+stride-1)/stride == 2)
B[stride] += B[0];
} else {
int64_t stride2 = 2*stride;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=stride; i<n; i+=stride2){
B[i] = B[i]+B[i-stride];
}
parallel_postfix(n-stride, stride2, B+stride);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=stride; i<n-stride; i+=stride2){
B[i+stride] += B[i];
}
}
}
/**
* \brief compute prefix sum
* \param[in] n integer length of array
* \param[in] A array of input data of size n
* \param[in,out] B initially zero array of size n, on output B[i] = sum_{j=0}^i/stride-1 A[j*stride]
*/
template <typename dtype>
void parallel_prefix(int64_t n, int64_t stride, dtype * B){
if (n/stride < 2){
if ((n-1)/stride >= 1)
B[stride] = B[0];
B[0] = 0.;
} else {
int64_t stride2 = 2*stride;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=stride; i<n; i+=stride2){
B[i] = B[i]+B[i-stride];
}
int64_t nsub = (n+stride-1)/stride;
if (nsub % 2 != 0){
B[(nsub-1)*stride] = B[(nsub-2)*stride];
}
parallel_prefix(n-stride, stride2, B+stride);
if (nsub % 2 != 0){
B[(nsub-1)*stride] += B[(nsub-2)*stride];
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t i=stride; i<n; i+=stride2){
dtype num = B[i-stride];
B[i-stride] = B[i];
B[i] += num;
}
}
}
/**
* \brief compute prefix sum
* \param[in] n integer length of array
* \param[in] A array of input data of size n
* \param[in,out] B initially zero array of size n, on output B[i] = sum_{j=0}^i-1 A[j]
*/
template <typename dtype>
void prefix(int64_t n, dtype const * A, dtype * B){
#pragma omp parallel for
for (int64_t i=0; i<n; i++){
B[i] = A[i];
}
CTF_int::parallel_prefix<dtype>(n, 1, B);
}
}
#endif
|
cpl_fft-test.c | /*
* This file is part of the ESO Common Pipeline Library
* Copyright (C) 2001-2017 European Southern Observatory
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
/*-----------------------------------------------------------------------------
Includes
-----------------------------------------------------------------------------*/
#include "cpl_fft.h"
#include "cpl_test.h"
#include "cpl_image_io_impl.h"
/*-----------------------------------------------------------------------------
Defines
-----------------------------------------------------------------------------*/
#ifndef IMAGESZ
#define IMAGESZ 10
#endif
#ifndef IMAGENZ
#define IMAGENZ 5
#endif
#ifndef CONSTANT
#define CONSTANT 200
#endif
/*----------------------------------------------------------------------------*/
/**
* @defgroup cpl_fft_test Unit tests of the CPL FFT functions
*/
/*----------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------
Private Function prototypes
-----------------------------------------------------------------------------*/
static void cpl_fft_image_test(void);
#if defined CPL_FFTWF_INSTALLED || defined CPL_FFTW_INSTALLED
static void cpl_fft_image_test_one(cpl_size, cpl_size, cpl_type);
static void cpl_fft_imagelist_test_one(cpl_size, cpl_size, cpl_size, cpl_type);
static
void cpl_fft_imagelist_test_image(cpl_size, cpl_size, cpl_size, cpl_type);
static void cpl_fft_image_test_correlate(cpl_size, cpl_size, cpl_type);
#endif
/*----------------------------------------------------------------------------*/
/**
@brief Unit tests of cpl_fft module
**/
/*----------------------------------------------------------------------------*/
int main(void)
{
const cpl_type imtypes[] = {CPL_TYPE_DOUBLE, CPL_TYPE_FLOAT};
cpl_boolean do_bench;
int i;
cpl_test_init(PACKAGE_BUGREPORT, CPL_MSG_WARNING);
do_bench = cpl_msg_get_level() <= CPL_MSG_INFO;
/* Insert tests below */
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < 2; i++) {
const cpl_size mz = do_bench ? 10 * IMAGENZ : IMAGENZ;
cpl_size nz;
/* Collect wisdom */
#ifdef CPL_FFTWF_INSTALLED
if (imtypes[i] == CPL_TYPE_FLOAT) {
cpl_fft_image_test_one( 16, 16, imtypes[i]);
cpl_fft_image_test_one( 4, 32, imtypes[i]);
cpl_fft_image_test_one( 4, 4, imtypes[i]);
cpl_fft_image_test_one( 2, 128, imtypes[i]);
cpl_fft_image_test_one(128, 2, imtypes[i]);
cpl_fft_image_test_one(128, 1, imtypes[i]);
cpl_fft_image_test_one( 1, 128, imtypes[i]);
cpl_fft_image_test_correlate( 16, 16, imtypes[i]);
cpl_fft_image_test_correlate( 64, 128, imtypes[i]);
cpl_fft_image_test_correlate(128, 64, imtypes[i]);
cpl_fft_image_test_correlate(128, 128, imtypes[i]);
if (do_bench) {
cpl_fft_image_test_one(256, 256, imtypes[i]);
cpl_fft_image_test_correlate(512, 512, imtypes[i]);
}
}
#endif
#ifdef CPL_FFTW_INSTALLED
if (imtypes[i] == CPL_TYPE_DOUBLE) {
cpl_fft_image_test_one( 16, 16, imtypes[i]);
cpl_fft_image_test_one( 32, 4, imtypes[i]);
cpl_fft_image_test_one( 4, 4, imtypes[i]);
cpl_fft_image_test_one( 2, 128, imtypes[i]);
cpl_fft_image_test_one(128, 2, imtypes[i]);
cpl_fft_image_test_one(128, 1, imtypes[i]);
cpl_fft_image_test_one( 1, 128, imtypes[i]);
cpl_fft_image_test_correlate( 16, 16, imtypes[i]);
cpl_fft_image_test_correlate( 64, 128, imtypes[i]);
cpl_fft_image_test_correlate(128, 64, imtypes[i]);
cpl_fft_image_test_correlate(128, 128, imtypes[i]);
if (do_bench) {
cpl_fft_image_test_one(256, 256, imtypes[i]);
cpl_fft_image_test_correlate(512, 512, imtypes[i]);
}
}
#endif
for (nz = 1; nz <= 1 + mz; nz+= mz) {
#ifdef CPL_FFTWF_INSTALLED
if (imtypes[i] == CPL_TYPE_FLOAT) {
cpl_fft_imagelist_test_image( 16, 16, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 4, 32, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 4, 4, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 2, 128, nz, imtypes[i]);
cpl_fft_imagelist_test_image(128, 2, nz, imtypes[i]);
cpl_fft_imagelist_test_image(128, 1, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 1, 128, nz, imtypes[i]);
if (do_bench) {
cpl_fft_imagelist_test_image(256, 256, nz, imtypes[i]);
}
}
#endif
#ifdef CPL_FFTW_INSTALLED
if (imtypes[i] == CPL_TYPE_DOUBLE) {
cpl_fft_imagelist_test_image( 16, 16, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 32, 4, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 4, 4, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 2, 128, nz, imtypes[i]);
cpl_fft_imagelist_test_image(128, 2, nz, imtypes[i]);
cpl_fft_imagelist_test_image(128, 1, nz, imtypes[i]);
cpl_fft_imagelist_test_image( 1, 128, nz, imtypes[i]);
if (do_bench) {
cpl_fft_imagelist_test_image(256, 256, nz, imtypes[i]);
}
}
#endif
}
}
cpl_fft_image_test();
/* End of tests */
return cpl_test_end(0);
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Unit tests of the function
@see cpl_fft_image()
**/
/*----------------------------------------------------------------------------*/
static void cpl_fft_image_test(void)
{
const cpl_type imtypes[] = {CPL_TYPE_DOUBLE, CPL_TYPE_FLOAT,
CPL_TYPE_INT, CPL_TYPE_DOUBLE_COMPLEX,
CPL_TYPE_FLOAT_COMPLEX};
int ityp;
int nok = 0; /* Number of successful calls */
/* Insert tests below */
/* Iterate through all pixel types */
for (ityp = 0; ityp < (int)(sizeof(imtypes)/sizeof(imtypes[0])); ityp++) {
const cpl_type imtype = imtypes[ityp];
int ityp2;
cpl_image * img1 = cpl_image_new(IMAGESZ, IMAGESZ, imtype);
cpl_image * img3 = cpl_image_new(IMAGESZ, IMAGESZ, imtype);
cpl_error_code error;
/* Various error checks */
error = cpl_fft_image(img3, NULL, CPL_FFT_FORWARD);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT);
error = cpl_fft_image(NULL, img3, CPL_FFT_FORWARD);
cpl_test_eq_error(error, CPL_ERROR_NULL_INPUT);
error = cpl_fft_image(img3, img3, CPL_FFT_FORWARD | CPL_FFT_BACKWARD);
if (imtype & CPL_TYPE_COMPLEX) {
if (imtype & CPL_TYPE_DOUBLE) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
#else
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
#endif
} else if (imtype & CPL_TYPE_FLOAT) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
#else
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
#endif
} else {
cpl_test_eq_error(error, CPL_ERROR_ILLEGAL_INPUT);
}
} else if (imtype == CPL_TYPE_DOUBLE) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH);
#else
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
#endif
} else if (imtype == CPL_TYPE_FLOAT) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH);
#else
cpl_test_eq_error(error, CPL_ERROR_UNSUPPORTED_MODE);
#endif
} else {
cpl_test_eq_error(error, CPL_ERROR_TYPE_MISMATCH);
}
if (!(imtype & CPL_TYPE_COMPLEX)) {
error = cpl_image_fill_noise_uniform(img1, 0, CONSTANT);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
for (ityp2 = 0; ityp2 < (int)(sizeof(imtypes)/sizeof(imtypes[0]));
ityp2++) {
const cpl_type imtype2 = imtypes[ityp2];
cpl_image * img2 = cpl_image_new(IMAGESZ, IMAGESZ, imtype2);
const cpl_image * imgin = img3;
cpl_image * imgout = img2;
int idir;
/* No scaling on the forward transform has no effect */
unsigned mode = CPL_FFT_FORWARD | CPL_FFT_NOSCALE;
error = cpl_image_copy(img3, img1, 1, 1);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Transform first forward, then backward */
/* Those two iterations will succeed iff the input image
and output image have matching non-integer precision */
for (idir = 0; idir < 2; idir++, mode = CPL_FFT_BACKWARD,
imgin = img2, imgout = img3) {
error = cpl_fft_image(imgout, imgin, mode);
if (cpl_image_get_type(img3) == CPL_TYPE_FLOAT &&
cpl_image_get_type(img2) ==
(CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX)) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(CPL_ERROR_NONE, error);
nok++;
if (mode == CPL_FFT_BACKWARD) {
/* Transformed forward and backwards, so the result
should equal the original input */
cpl_test_image_abs(img1, img3,
3.0 * FLT_EPSILON * CONSTANT);
}
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if (cpl_image_get_type(img3) == CPL_TYPE_DOUBLE &&
cpl_image_get_type(img2) ==
(CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX)) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(CPL_ERROR_NONE, error);
nok++;
if (mode == CPL_FFT_BACKWARD) {
/* Transformed forward and backwards, so the result
should equal the original input */
cpl_test_image_abs(img1, img3,
5.0 * DBL_EPSILON * CONSTANT);
}
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if (cpl_image_get_type(img3) ==
(CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX) &&
cpl_image_get_type(img2) ==
(CPL_TYPE_DOUBLE | CPL_TYPE_COMPLEX)) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(CPL_ERROR_NONE, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if (cpl_image_get_type(img3) ==
(CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX) &&
cpl_image_get_type(img2) ==
(CPL_TYPE_FLOAT | CPL_TYPE_COMPLEX)) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(CPL_ERROR_NONE, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if ((imtype & CPL_TYPE_INT) ||
(imtype2 & CPL_TYPE_INT)) {
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
} else if ((imtype & (CPL_TYPE_FLOAT | CPL_TYPE_DOUBLE | CPL_TYPE_INT)) !=
(imtype2 & (CPL_TYPE_FLOAT | CPL_TYPE_DOUBLE | CPL_TYPE_INT))) {
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
} else if (!((imtype & CPL_TYPE_COMPLEX) ^
(imtype2 & CPL_TYPE_COMPLEX))) {
/* None or both are complex */
if (imtype == CPL_TYPE_DOUBLE) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if (imtype == CPL_TYPE_FLOAT) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else {
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
}
} else if (imtype & CPL_TYPE_DOUBLE) {
#ifdef CPL_FFTW_INSTALLED
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else if (imtype & CPL_TYPE_FLOAT) {
#ifdef CPL_FFTWF_INSTALLED
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
#else
cpl_test_eq_error(CPL_ERROR_UNSUPPORTED_MODE, error);
#endif
} else {
cpl_test_eq_error(CPL_ERROR_TYPE_MISMATCH, error);
}
}
cpl_image_delete(img2);
}
cpl_image_delete(img1);
cpl_image_delete(img3);
}
#if defined CPL_FFTWF_INSTALLED && defined CPL_FFTW_INSTALLED
cpl_test_eq(nok, 4); /* Forward and backward of float and double */
#elif defined CPL_FFTWF_INSTALLED
cpl_msg_warning(cpl_func, "Double precision FFT not available for "
"unit testing");
cpl_test_eq(nok, 2); /* Forward and backward of type float */
#elif defined CPL_FFTW_INSTALLED
cpl_msg_warning(cpl_func, "Single precision FFT not available for "
"unit testing");
cpl_test_eq(nok, 2); /* Forward and backward of type double */
#else
cpl_msg_warning(cpl_func, "FFT not available for unit testing");
cpl_test_zero(nok);
#endif
}
#if defined CPL_FFTWF_INSTALLED || defined CPL_FFTW_INSTALLED
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Unit tests of the function
@param nx Size in x (the number of columns)
@param ny Size in y (the number of rows)
@param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT
@see cpl_fft_image()
**/
/*----------------------------------------------------------------------------*/
static void cpl_fft_image_test_one(cpl_size nx, cpl_size ny, cpl_type type)
{
const int rigor = CPL_FFT_FIND_MEASURE;
cpl_image * image1r = cpl_image_new(nx, ny, type);
cpl_image * image1c;
cpl_image * image2 = cpl_image_new(nx, ny, type);
cpl_image * image3r = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_image * image3c = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_image * image3h = cpl_image_new(nx/2+1, ny, type | CPL_TYPE_COMPLEX);
cpl_image * image4;
cpl_image * image4r;
cpl_image * image4c;
cpl_image * image5 = cpl_image_new(nx, ny, type);
cpl_error_code error;
error = cpl_image_fill_noise_uniform(image1r, 0.0, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
image1c = cpl_image_cast(image1r, type | CPL_TYPE_COMPLEX);
/* Real-to-complex, both full size */
error = cpl_fft_image(image3r, image1r, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Extract half of r2c transform */
image4 = cpl_image_extract(image3r, 1, 1, nx/2 + 1, ny);
/* Real-to-complex, complex is half size */
error = cpl_fft_image(image3h, image1r, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* That half has to match the transform onto the half-sized image */
cpl_test_image_abs(image3h, image4, 80.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-complex of same real values */
error = cpl_fft_image(image3c, image1c, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* In-place complex-to-complex of same real values */
error = cpl_fft_image(image1c, image1c, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_image_abs(image3c, image1c, type == CPL_TYPE_DOUBLE ?
128.0 * DBL_EPSILON : 40.0 * FLT_EPSILON);
/* Extract half of c2c transform */
cpl_image_delete(image4);
image4 = cpl_image_extract(image3c, 1, 1, nx/2 + 1, ny);
cpl_test_image_abs(image3h, image4, 128.0 * nx *
(type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON));
/* Complex-to-real, both full size */
error = cpl_fft_image(image2, image3r, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* The back-transformed must match the original image */
cpl_test_image_abs(image1r, image2, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-real, complex is half size */
error = cpl_fft_image(image2, image3h, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* The back-transformed must match the original image */
cpl_test_image_abs(image1r, image2, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-complex of same real values */
error = cpl_fft_image(image3r, image3c, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* In-place complex-to-complex of same real values */
error = cpl_fft_image(image3c, image3c, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_image_abs(image3r, image3c, 3.2 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* The back-transformed must match the original image - on the real part */
image4r = cpl_image_extract_real(image3r);
cpl_test_image_abs(image1r, image4r, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* The back-transformed must have a zero-valued imaginary part */
image4c = cpl_image_extract_imag(image3r);
cpl_image_delete(image4);
image4 = cpl_image_new(nx, ny, type);
cpl_test_image_abs(image4c, image4, 2.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
cpl_image_delete(image1r);
cpl_image_delete(image1c);
cpl_image_delete(image2);
cpl_image_delete(image3r);
cpl_image_delete(image3c);
cpl_image_delete(image3h);
cpl_image_delete(image4);
cpl_image_delete(image4r);
cpl_image_delete(image4c);
cpl_image_delete(image5);
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Unit tests of the function
@param nx Size in x (the number of columns)
@param ny Size in y (the number of rows)
@param nz Size in z (the number of planes/images)
@param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT
@see cpl_fft_image()
**/
/*----------------------------------------------------------------------------*/
static void cpl_fft_imagelist_test_one(cpl_size nx, cpl_size ny, cpl_size nz,
cpl_type type)
{
const int rigor = CPL_FFT_FIND_MEASURE;
cpl_imagelist * ilist1r = cpl_imagelist_new();
cpl_imagelist * ilist1c = cpl_imagelist_new();
cpl_imagelist * ilist2 = cpl_imagelist_new();
cpl_imagelist * ilist3r = cpl_imagelist_new();
cpl_imagelist * ilist3c = cpl_imagelist_new();
cpl_imagelist * ilist3h = cpl_imagelist_new();
cpl_imagelist * ilist4 = cpl_imagelist_new();
cpl_imagelist * ilist4r = cpl_imagelist_new();
cpl_imagelist * ilist4c = cpl_imagelist_new();
cpl_imagelist * ilistr = cpl_imagelist_new();
cpl_imagelist * ilistc = cpl_imagelist_new();
cpl_imagelist * ilist5 = cpl_imagelist_new();
cpl_error_code error;
cpl_size i;
for (i = 0; i < nz; i++) {
cpl_image * image1r = cpl_image_new(nx, ny, type);
cpl_image * image1c;
cpl_image * image2 = cpl_image_new(nx, ny, type);
cpl_image * image3r = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_image * image3c;
cpl_image * image3h = cpl_image_new(nx/2+1, ny, type
| CPL_TYPE_COMPLEX);
cpl_image * image5 = cpl_image_new(nx, ny, type);
error = cpl_image_fill_noise_uniform(image1r, 0.0, 1.0);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist1r, image1r, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
image1c = cpl_image_cast(image1r, type | CPL_TYPE_COMPLEX);
error = cpl_imagelist_set(ilist1c, image1c, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist2 , image2, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist3r, image3r, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
image3c = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
error = cpl_imagelist_set(ilist3c, image3c, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist3h, image3h, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist5, image5, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
/* Real-to-complex, both full size */
error = cpl_fft_imagelist(ilist3r, ilist1r, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* Extract half of r2c transform */
for (i = 0; i < nz; i++) {
const cpl_image * image3r = cpl_imagelist_get_const(ilist3r, i);
cpl_image * image4 = cpl_image_extract(image3r, 1, 1,
nx/2 + 1, ny);
error = cpl_imagelist_set(ilist4, image4, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
/* Real-to-complex, complex is half size */
error = cpl_fft_imagelist(ilist3h, ilist1r, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* That half has to match the transform onto the half-sized image */
cpl_test_imagelist_abs(ilist3h, ilist4, 80.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-complex of same real values */
error = cpl_fft_imagelist(ilist3c, ilist1c, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* In-place complex-to-complex of same real values */
error = cpl_fft_imagelist(ilist1c, ilist1c, CPL_FFT_FORWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_imagelist_abs(ilist3c, ilist1c, 2.0 * (nx + ny) *
(type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Extract half of c2c transform */
cpl_imagelist_empty(ilist4);
for (i = 0; i < nz; i++) {
const cpl_image * image3c = cpl_imagelist_get_const(ilist3c, i);
cpl_image * image4 = cpl_image_extract(image3c, 1, 1,
nx/2 + 1, ny);
error = cpl_imagelist_set(ilist4, image4, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
cpl_test_imagelist_abs(ilist3h, ilist4, 128.0 * nx *
(type == CPL_TYPE_DOUBLE ? DBL_EPSILON : FLT_EPSILON));
/* Complex-to-real, both full size */
error = cpl_fft_imagelist(ilist2, ilist3r, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* The back-transformed must match the original image */
cpl_test_imagelist_abs(ilist1r, ilist2, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-real, complex is half size */
error = cpl_fft_imagelist(ilist2, ilist3h, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* The back-transformed must match the original image */
cpl_test_imagelist_abs(ilist1r, ilist2, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* Complex-to-complex of same real values */
error = cpl_fft_imagelist(ilist3r, ilist3c, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
/* In-place complex-to-complex of same real values */
error = cpl_fft_imagelist(ilist3c, ilist3c, CPL_FFT_BACKWARD | rigor);
cpl_test_eq_error(error, CPL_ERROR_NONE);
cpl_test_imagelist_abs(ilist3r, ilist3c, 8.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
/* The back-transformed must match the original image - on the real part */
/* - and the back-transformed must have a zero-valued imaginary part */
cpl_imagelist_empty(ilist4);
for (i = 0; i < nz; i++) {
const cpl_image * image3r = cpl_imagelist_get_const(ilist3r, i);
cpl_image * image4r = cpl_image_extract_real(image3r);
cpl_image * image4c = cpl_image_extract_imag(image3r);
cpl_image * image4 = cpl_image_new(nx, ny, type);
error = cpl_imagelist_set(ilist4r, image4r, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist4c, image4c, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
error = cpl_imagelist_set(ilist4, image4, i);
cpl_test_eq_error(error, CPL_ERROR_NONE);
}
cpl_test_imagelist_abs(ilist1r, ilist4r, 6.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
cpl_test_imagelist_abs(ilist4c, ilist4, 2.0 * (type == CPL_TYPE_DOUBLE ?
DBL_EPSILON : FLT_EPSILON));
cpl_imagelist_delete(ilist1r);
cpl_imagelist_delete(ilist1c);
cpl_imagelist_delete(ilist2);
cpl_imagelist_delete(ilist3r);
cpl_imagelist_delete(ilist3c);
cpl_imagelist_delete(ilist3h);
cpl_imagelist_delete(ilist4);
cpl_imagelist_delete(ilist4r);
cpl_imagelist_delete(ilist4c);
cpl_imagelist_delete(ilistr);
cpl_imagelist_delete(ilistc);
cpl_imagelist_delete(ilist5);
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Benchmark cpl_fft_imagelist() aginst cpl_fft_image()
@param nx Size in x (the number of columns)
@param ny Size in y (the number of rows)
@param nz Size in z (the number of planes/images)
@param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT
@see cpl_fft_imagelist_test_one() cpl_fft_image_test_one()
**/
/*----------------------------------------------------------------------------*/
static void cpl_fft_imagelist_test_image(cpl_size nx, cpl_size ny, cpl_size nz,
cpl_type type)
{
cpl_flops flopl0, flopl1, flopi0, flopi1;
double timel0, timel1, timei0, timei1;
cpl_size i;
flopl0 = cpl_test_get_flops();
timel0 = cpl_test_get_cputime();
cpl_fft_imagelist_test_one(nx, ny, nz, type);
flopl1 = cpl_test_get_flops() - flopl0;
timel1 = cpl_test_get_cputime() - timel0;
flopi0 = cpl_test_get_flops();
timei0 = cpl_test_get_cputime();
for (i = 0; i < nz; i++) {
cpl_fft_image_test_one(nx, ny, type);
}
flopi1 = cpl_test_get_flops() - flopi0;
timei1 = cpl_test_get_cputime() - timei0;
if (timei1 > 0.0 && timel1 > 0.0) {
cpl_msg_info(cpl_func, "List vs single %d X %d X %d (%s): %g <=> %g "
"[s] (%g <=> %g [MFLOP/s])", (int)nx, (int)ny,
(int)nz, cpl_type_get_name(type), timel1, timei1,
1e-6*(double)flopl1/timel1,
1e-6*(double)flopi1/timei1);
} else {
cpl_msg_info(cpl_func, "List vs single %d X %d X %d (%s): %g <=> %g "
"[s] (%g <=> %g [MFLOP])", (int)nx, (int)ny,
(int)nz, cpl_type_get_name(type), timel1, timei1,
1e-6*(double)flopl1, 1e-6*(double)flopi1);
}
}
/*----------------------------------------------------------------------------*/
/**
@internal
@brief Try to use the FFT for correlation
@param nx Size in x (the number of columns)
@param ny Size in y (the number of rows)
@param type One of CPL_TYPE_DOUBLE or CPL_TYPE_FLOAT
@see cpl_fft_image_test_one()
**/
/*----------------------------------------------------------------------------*/
static
void cpl_fft_image_test_correlate(cpl_size nx, cpl_size ny, cpl_type type)
{
cpl_image * ia = cpl_image_new(nx, ny, type);
cpl_image * ib = cpl_image_new(nx, ny, type);
cpl_image * ic = cpl_image_new(nx, ny, type);
cpl_image * fa = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_image * fb = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_image * fc = cpl_image_new(nx, ny, type | CPL_TYPE_COMPLEX);
cpl_imagelist * iab = cpl_imagelist_new();
cpl_imagelist * fab = cpl_imagelist_new();
cpl_size xmax, ymax;
cpl_error_code code;
code = cpl_imagelist_set(iab, ia, 0);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_imagelist_set(iab, ib, 1);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_imagelist_set(fab, fa, 0);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_imagelist_set(fab, fb, 1);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_fill_gaussian(ia, nx/2.0, ny/2.0, 1.0, 1.0, 1.0);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_copy(ib, ia, 1, 1);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_shift(ib, nx/4, ny/4);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_fft_imagelist(fab, iab, CPL_FFT_FORWARD);
cpl_test_eq_error(code, CPL_ERROR_NONE);
/* Auto-correlate */
code = cpl_image_conjugate(fc, fa);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_multiply(fc, fa);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_fft_image(ic, fc, CPL_FFT_BACKWARD | CPL_FFT_NOSCALE);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_get_maxpos(ic, &xmax, &ymax);
cpl_test_eq_error(code, CPL_ERROR_NONE);
cpl_test_eq(xmax, 1);
cpl_test_eq(ymax, 1);
/* Cross-correlate */
code = cpl_image_conjugate(fc, fb);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_multiply(fc, fa);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_fft_image(ic, fc, CPL_FFT_BACKWARD | CPL_FFT_NOSCALE);
cpl_test_eq_error(code, CPL_ERROR_NONE);
code = cpl_image_get_maxpos(ic, &xmax, &ymax);
cpl_test_eq_error(code, CPL_ERROR_NONE);
cpl_test_eq(xmax, 1 + nx/2 + nx/4);
cpl_test_eq(ymax, 1 + ny/2 + ny/4);
cpl_imagelist_delete(iab);
cpl_imagelist_delete(fab);
cpl_image_delete(ic);
cpl_image_delete(fc);
}
#endif
|
ej1.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
int main() {
double start = omp_get_wtime();
//PROGRAMA
int threads;
printf("\nIntroduce el numero de hilos a ejecutar: ");fflush(stdin);scanf("%i", &threads);
printf("\nEstamos ejecutando %i hilo\n", omp_get_num_threads()); //Como aun no hemos entrado, el num de hilos es 1
#pragma omp parallel num_threads(threads) //A partir de aqui, hace fork, y salen X hilos
{
#pragma omp single //Se ejecuta unicamente en 1 hilo
printf("\nRegion en paralelo hay %i hilos", omp_get_num_threads());
int id = omp_get_thread_num();
printf("\nMi ID de hilo es %i", id);
}
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa %lfs\n-------------------------------------------\n", omp_get_wtime()-start);
return 0;
}
|
hello-openmp.c | #include<stdio.h>
#include<omp.h>
int main()
{
int nthreads, tid;
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread = %d \n", tid);
//master thread
if(tid == 0){
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} //threads terminated and rejoin
}
|
BIDMach_CPUMACH.c | #include <jni.h>
#include <omp.h>
#include <math.h>
#include <stdlib.h>
#ifdef __GNUC__
#define __forceinline __attribute__((always_inline)) inline
#endif
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecPos
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint skip, jintArray jW, jintArray jLB, jintArray jUB,
jfloatArray jA, jfloatArray jB, jfloat lrate, jfloat vexp, jint nthreads)
{
int ithread;
int * W = (jint *)((*env)->GetPrimitiveArrayCritical(env, jW, JNI_FALSE));
int * LB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jLB, JNI_FALSE));
int * UB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jUB, JNI_FALSE));
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
int i, j, k, c, ia, ib, coff, itmp;
float cv, ascale, bscale;
float * daa = (float *)malloc(nrows*sizeof(float));
for (i = istart; i < iend; i++) {
itmp = W[i];
ia = nrows * itmp;
if (ia >= 0) {
ascale = pow(1+itmp, vexp);
for (c = 0; c < nrows; c++) {
daa[c] = 0;
}
for (j = LB[i]; j <= UB[i]; j++) {
if (j != 0 && i + j >= 0 && i + j < ncols) {
itmp = W[i + j];
ib = nrows * itmp;
if (ib >= 0) {
bscale = pow(1+itmp, vexp);
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
cv = lrate * (1.0f - cv);
for (c = 0; c < nrows; c++) {
daa[c] += ascale * cv * B[c + ib];
B[c + ib] += bscale * cv * A[c + ia];
}
}
}
}
for (c = 0; c < nrows; c++) {
A[c + ia] += daa[c];
}
}
}
free(daa);
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jUB, UB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jLB, LB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jW, W, 0);
}
void mapIndx(int *mm, int *ii, int *ismine, int *ishead, int indx, int islice, int nslices, int nhead, int maxcols, int nrows, int offset)
{
int newi = indx;
if (indx >= nhead) newi = ((indx - nhead) / nslices + nhead); // new column index
*mm = newi / maxcols + offset; // which matrix are we in?
*ismine = (indx >= nhead) && (indx % nslices == islice);
*ishead = (indx < nhead);
*ii = nrows * (newi - (*mm) * maxcols);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecPosSlice
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint skip, jintArray jW, jintArray jLB, jintArray jUB,
jobjectArray jMM, jfloat lrate, jfloat vexp, jint nthreads,
jint islice, jint nslices, jint maxCols, jint nHead, jint dualMode, jint doHead)
{
int ix, ithread;
int offset = 0;
int * W = (jint *)((*env)->GetPrimitiveArrayCritical(env, jW, JNI_FALSE));
int * LB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jLB, JNI_FALSE));
int * UB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jUB, JNI_FALSE));
int nelems = (*env)->GetArrayLength(env, jMM);
jfloatArray *X = malloc(nelems * sizeof(jfloatArray));
float **Y = malloc(nelems * sizeof(float *));
float *A, *B;
if (dualMode) offset = 1;
for (ix = 0; ix < nelems; ix++) {
X[ix] = (jfloatArray)((*env)->GetObjectArrayElement(env, jMM, ix));
Y[ix] = (float *)((*env)->GetPrimitiveArrayCritical(env, X[ix], JNI_FALSE));
}
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
int i, j, k, c, ia, ib, iac, ibc, coff;
int aismine, bismine, aishead, bishead, ma, mb;
float cv, ascale, bscale;
int touched;
float * daa = (float *)malloc(nrows*sizeof(float));
for (i = istart; i < iend; i++) {
iac = W[i];
if (iac >= 0) {
mapIndx(&ma, &ia, &aismine, &aishead, iac, islice, nslices, nHead, maxCols, nrows, offset);
A = Y[2*ma+1];
ascale = pow(1+iac, vexp);
for (c = 0; c < nrows; c++) {
daa[c] = 0;
}
touched = 0;
for (j = LB[i]; j <= UB[i]; j++) {
if (j != 0 && i + j >= 0 && i + j < ncols) {
ibc = W[i + j];
if (ibc >= 0) {
mapIndx(&mb, &ib, &bismine, &bishead, ibc, islice, nslices, nHead, maxCols, nrows, offset);
B = Y[2*mb];
bscale = pow(1+ibc, vexp);
if ((doHead > 1 && aishead && bishead) || (aismine && bishead) || (bismine && aishead) || (aismine && bismine)) {
touched = 1;
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
cv = lrate * (1.0f - cv);
for (c = 0; c < nrows; c++) {
daa[c] += ascale * cv * B[c + ib];
}
if (bismine || (bishead && doHead)) {
for (c = 0; c < nrows; c++) {
B[c + ib] += bscale * cv * A[c + ia];
}
}
}
}
}
}
if (touched && (aismine || (aishead && doHead))) {
for (c = 0; c < nrows; c++) {
A[c + ia] += daa[c];
}
}
}
}
free(daa);
}
for (ix = nelems-1; ix >= 0; ix--) {
(*env)->ReleasePrimitiveArrayCritical(env, X[ix], Y[ix], 0);
}
(*env)->ReleasePrimitiveArrayCritical(env, jUB, UB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jLB, LB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jW, W, 0);
free(Y);
free(X);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecNeg
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint nwa, jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jfloat lrate, jfloat vexp, jint nthreads)
{
int ithread;
int * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
int * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int i, j, k, c, ia, ib, ja, itmp;
float cv, ascale, bscale;
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
float * daa = (float *)malloc(nwa*nrows*sizeof(float));
float * dbb = (float *)malloc(nrows*sizeof(float));
for (i = istart; i < iend; i++) {
for (j = 0; j < nwa; j++) {
ja = j * nrows;
for (c = 0; c < nrows; c++) {
daa[c + ja] = 0;
}
}
for (k = 0; k < nwb; k++) {
itmp = WB[k+i*nwb];
ib = nrows * itmp;
bscale = pow(1+itmp, vexp);
for (c = 0; c < nrows; c++) {
dbb[c] = 0;
}
for (j = 0; j < nwa; j++) {
itmp = WA[j+i*nwa];
ia = nrows * itmp;
ascale = pow(1+itmp, vexp);
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
cv = - lrate * cv;
ja = j * nrows;
for (c = 0; c < nrows; c++) {
dbb[c] += bscale * cv * A[c + ia];
daa[c + ja] += ascale * cv * B[c + ib];
}
}
for (c = 0; c < nrows; c++) {
B[c + ib] += dbb[c];
}
}
for (j = 0; j < nwa; j++) {
ja = j * nrows;
ia = nrows * WA[j+i*nwa];
for (c = 0; c < nrows; c++) {
A[c + ia] += daa[c + ja];
}
}
}
free(dbb);
free(daa);
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecNegSlice
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint nwa, jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jMM, jfloat lrate, jfloat vexp, jint nthreads,
jint islice, jint nslices, jint maxCols, jint nHead, jint dualMode, jint doHead)
{
int ix, ithread, offset = 0;
int * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
int * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
float *A, *B;
int nelems = (*env)->GetArrayLength(env, jMM);
jfloatArray *X = malloc(nelems * sizeof(jfloatArray));
float **Y = malloc(nelems * sizeof(float *));
if (dualMode) offset = 1;
for (ix = 0; ix < nelems; ix++) {
X[ix] = (jfloatArray)((*env)->GetObjectArrayElement(env, jMM, ix));
Y[ix] = (float *)((*env)->GetPrimitiveArrayCritical(env, X[ix], JNI_FALSE));
}
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int i, j, k, c, ia, ib, iac, ibc, ja;
float cv, ascale, bscale;
int aismine, bismine, aishead, bishead, ma, mb;
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
float * daa = (float *)malloc(nwa*nrows*sizeof(float));
float * dbb = (float *)malloc(nrows*sizeof(float));
for (i = istart; i < iend; i++) {
for (j = 0; j < nwa; j++) {
ja = j * nrows;
for (c = 0; c < nrows; c++) {
daa[c + ja] = 0;
}
}
for (k = 0; k < nwb; k++) {
ibc = WB[k+i*nwb];
mapIndx(&mb, &ib, &bismine, &bishead, ibc, islice, nslices, nHead, maxCols, nrows, offset);
B = Y[2*mb];
bscale = pow(1+ibc, vexp);
for (c = 0; c < nrows; c++) {
dbb[c] = 0;
}
for (j = 0; j < nwa; j++) {
iac = WA[j+i*nwa];
mapIndx(&ma, &ia, &aismine, &aishead, iac, islice, nslices, nHead, maxCols, nrows, offset);
A = Y[2*ma+1];
ascale = pow(1+iac, vexp);
if ((doHead > 1 && aishead && bishead) || (aismine && bishead) || (bismine && aishead) || (aismine && bismine)) {
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
cv = - lrate * cv;
ja = j * nrows;
for (c = 0; c < nrows; c++) {
dbb[c] += bscale * cv * A[c + ia];
daa[c + ja] += ascale * cv * B[c + ib];
}
}
}
if (bismine || (bishead && doHead)) {
for (c = 0; c < nrows; c++) {
B[c + ib] += dbb[c];
}
}
}
for (j = 0; j < nwa; j++) {
ja = j * nrows;
iac = WA[j+i*nwa];
mapIndx(&ma, &ia, &aismine, &aishead, iac, islice, nslices, nHead, maxCols, nrows, offset);
A = Y[2*ma+1];
if (aismine || (aishead && doHead)) {
for (c = 0; c < nrows; c++) {
A[c + ia] += daa[c + ja];
}
}
}
}
free(dbb);
free(daa);
}
for (ix = nelems-1; ix >= 0; ix--) {
(*env)->ReleasePrimitiveArrayCritical(env, X[ix], Y[ix], 0);
}
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
free(Y);
free(X);
}
JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecEvalPos
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint skip, jintArray jW, jintArray jLB, jintArray jUB,
jfloatArray jA, jfloatArray jB, jint nthreads)
{
int i, ithread;
int * W = (jint *)((*env)->GetPrimitiveArrayCritical(env, jW, JNI_FALSE));
int * LB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jLB, JNI_FALSE));
int * UB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jUB, JNI_FALSE));
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
double * pv = (double *)malloc(nthreads * sizeof(double));
double sum;
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
int i, j, k, c, ia, ib, coff;
float cv;
double dv = 0;
for (i = istart; i < iend; i++) {
ia = nrows * W[i];
if (ia >= 0) {
for (j = LB[i]; j <= UB[i]; j++) {
if (j != 0 && i + j >= 0 && i + j < ncols) {
ib = nrows * W[i + j];
if (ib >= 0) {
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
dv += log(fmax((double)cv, 1.0e-40));
}
}
}
}
}
pv[ithread] = dv;
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jUB, UB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jLB, LB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jW, W, 0);
for (i = 0; i < nthreads; i++) {
sum += pv[i];
}
free(pv);
return sum;
}
JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecEvalNeg
(JNIEnv *env, jobject obj, jint nrows, jint ncols, const jint nwa, const jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jint nthreads)
{
int i, ithread;
int * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
int * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
double * pv = (double *)malloc(nthreads * sizeof(double));
double sum;
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int i, j, k, c, ia, ib, ja;
float cv;
double dv = 0;
int istart = (1L * ithread * ncols)/nthreads;
int iend = (1L * (ithread+1) * ncols)/nthreads;
for (i = istart; i < iend; i++) {
for (k = 0; k < nwb; k++) {
ib = nrows * WB[k+i*nwb];
for (j = 0; j < nwa; j++) {
ia = nrows * WA[j+i*nwa];
cv = 0;
for (c = 0; c < nrows; c++) {
cv += A[c + ia] * B[c + ib];
}
if (cv > 16.0f) {
cv = 1.0f;
} else if (cv < -16.0f) {
cv = 0.0f;
} else {
cv = exp(cv);
cv = cv / (1.0f + cv);
}
dv += log(fmax(1.0 - (double)cv, 1.0e-40));
}
}
}
pv[ithread] = dv;
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
for (i = 0; i < nthreads; i++) {
sum += pv[i];
}
free(pv);
return sum;
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecFwd
(JNIEnv *env, jobject obj, jint nrows, jint ncols, const jint nwa, const jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jfloatArray jC)
{
int i;
jint * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
jint * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
jfloat * C = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int j, k, c, ia, ib, coff;
float sum;
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
sum = 0;
for (c = 0; c < nrows; c++) {
sum += A[c + ia] * B[c + ib];
}
coff = nwa * (k + nwb * i);
C[j + coff] = sum;
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_word2vecBwd
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint nwa, jint nwb, jintArray jWA, jintArray jWB,
jfloatArray jA, jfloatArray jB, jfloatArray jC, jfloat lrate)
{
int i, j, k, c;
float cv;
int ia, ib;
jint * WA = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWA, JNI_FALSE));
jint * WB = (jint *)((*env)->GetPrimitiveArrayCritical(env, jWB, JNI_FALSE));
jfloat * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
jfloat * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
jfloat * C = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
for (c = 0; c < nrows; c++) {
A[c + ia] = 0;
}
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
cv = lrate * C[j + nwa * (k + nwb * i)];
for (c = 0; c < nrows; c++) {
A[c + ia] += cv * B[c + ib];
}
}
}
for (k = 0; k < nwb; k++) {
ib = nrows*WB[k+i*nwb];
for (c = 0; c < nrows; c++) {
B[c + ib] = 0;
}
for (j = 0; j < nwa; j++) {
ia = nrows*WA[j+i*nwa];
cv = lrate * C[j + nwa * (k + nwb * i)];
for (c = 0; c < nrows; c++) {
B[c + ib] += cv * A[c + ia];
}
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWB, WB, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jWA, WA, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_testarrays
(JNIEnv *env, jobject obj, jobjectArray arr)
{
int i;
int nelems = (*env)->GetArrayLength(env, arr);
jfloatArray *X = malloc(nelems * sizeof(jfloatArray));
jfloat **Y = malloc(nelems * sizeof(jfloat *));
for (i = 0; i < nelems; i++) {
X[i] = (jfloatArray)((*env)->GetObjectArrayElement(env, arr, i));
Y[i] = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, X[i], JNI_FALSE));
}
printf("n=%d, v=%f, u=%f\n", nelems, Y[0][0], Y[1][0]);
fflush(stdout);
for (i = 0; i < nelems; i++) {
(*env)->ReleasePrimitiveArrayCritical(env, X[i], Y[i], 0);
}
free(X);
free(Y);
}
#define APPLYCFN(fn) \
for (i = istart; i < iend; i++) { \
float x = A[i]; \
B[i] = (fn); \
}
#define APPLYCOP(fn) \
for (i = istart; i < iend; i++) { \
float x = A[i]; \
float y = B[i]; \
C[i] = (fn); \
}
#define SIGMOIDN 0
#define TANHN 1
#define SOFTPLUSN 2
#define SIGMOIDX (x > 20.0f) ? 1.0f : ((x < -80.0f) ? 0.0f : 1.0f/(1.0f + exp(-x)))
#define SOFTPLUSX (x > 20.0f) ? x : ((x < -20.0f) ? 0.0f : log(1.0f + exp(x)))
#define SIGMOIDY (y * (x - x * x))
#define TANHY (y * (1.0f - x * x))
#define SOFTPLUSY y * ((x > 20.0f) ? 1.0f : ((x < -80.0f) ? 0.0f : 1.0f/(1.0f + exp(-x))))
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_applyfwd
(JNIEnv *env, jobject obj, jfloatArray jA, jfloatArray jB, jint ifn, jint n, jint nthreads)
{
int ithread;
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int istart = (1L * ithread * n)/nthreads;
int iend = (1L * (ithread+1) * n)/nthreads;
int i;
switch (ifn) {
case SIGMOIDN: APPLYCFN(SIGMOIDX); break;
case SOFTPLUSN: APPLYCFN(SOFTPLUSX); break;
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_applyderiv
(JNIEnv *env, jobject obj, jfloatArray jA, jfloatArray jB, jfloatArray jC, jint ifn, jint n, jint nthreads)
{
int ithread;
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * B = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE));
float * C = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE));
#pragma omp parallel for
for (ithread = 0; ithread < nthreads; ithread++) {
int istart = (1L * ithread * n)/nthreads;
int iend = (1L * (ithread+1) * n)/nthreads;
int i;
switch (ifn) {
case SIGMOIDN: APPLYCOP(SIGMOIDY); break;
case TANHN: APPLYCOP(TANHY); break;
case SOFTPLUSN: APPLYCOP(SOFTPLUSY); break;
}
}
(*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_multADAGrad
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint nnz, jfloatArray jA, jfloatArray jBdata, jintArray jBir, jintArray jBjc,
jfloatArray jMM, jfloatArray jSumsq, jfloatArray jMask, int maskrows, jfloatArray jlrate, jint lrlen,
jfloatArray jvexp, jint vexplen, jfloatArray jtexp, jint texplen, jfloat istep, jint addgrad, jfloat epsilon, jint biasv, jint nbr)
{
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * Bdata = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jBdata, JNI_FALSE));
int * Bir = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBir, JNI_FALSE));
int * Bjc = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBjc, JNI_FALSE));
float * MM = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMM, JNI_FALSE));
float * Sumsq = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jSumsq, JNI_FALSE));
float * lrate = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jlrate, JNI_FALSE));
float * vexp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jvexp, JNI_FALSE));
float * texp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jtexp, JNI_FALSE));
float * Mask = NULL;
int i;
int ioff = Bjc[0];
if (jMask != NULL) Mask = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMask, JNI_FALSE));
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int jstart = Bjc[i] - ioff;
int jend = Bjc[i+1] - ioff;
int j;
if (Mask != NULL || lrlen > 1 || vexplen > 1 || texplen > 1) {
for (j = jstart; j < jend ; j++) {
float bval = Bdata[j];
int ival = Bir[j] - ioff;
int k;
for (k = 0; k < nrows; k++) {
float lr, ve, te, pve, ste, ngrad;
float grad = A[k+i*nrows]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[k] : lrate[0];
ve = (vexplen > 1) ? vexp[k] : vexp[0];
te = (texplen > 1) ? texp[k] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[ival] == 0) MM[ihere] = 0;
}
}
}
}
if (biasv > 0) {
int ival = nbr;
int k;
for (k = 0; k < nrows; k++) {
float lr, ve, te, pve, ste, ngrad;
float grad = A[k+i*nrows];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[k] : lrate[0];
ve = (vexplen > 1) ? vexp[k] : vexp[0];
te = (texplen > 1) ? texp[k] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[ival] == 0) MM[ihere] = 0;
}
}
}
}
} else {
float lr, ve, te, pve, ste, ngrad;
lr = lrate[0];
ve = vexp[0];
te = texp[0];
for (j = jstart; j < jend ; j++) {
float bval = Bdata[j];
int ival = Bir[j] - ioff;
int k;
if (addgrad && ve == 0.5f && te == 0.5f) {
for (k = 0; k < nrows; k++) {
float grad = A[k+i*nrows]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
pve = sqrt(Sumsq[ihere]);
ngrad = grad * lr / pve;
MM[ihere] += ngrad;
}
} else {
for (k = 0; k < nrows; k++) {
float grad = A[k+i*nrows]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
}
}
}
if (biasv > 0) {
int ival = nbr;
int k;
if (addgrad && ve == 0.5f && te == 0.5f) {
for (k = 0; k < nrows; k++) {
float grad = A[k+i*nrows];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
pve = sqrt(Sumsq[ihere]);
ngrad = grad * lr / pve;
MM[ihere] += ngrad;
}
} else {
for (k = 0; k < nrows; k++) {
float grad = A[k+i*nrows];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
}
}
}
}
}
if (Mask != NULL) (*env)->ReleasePrimitiveArrayCritical(env, jMask, Mask, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jtexp, texp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jvexp, vexp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jlrate, lrate, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jSumsq, Sumsq, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jMM, MM, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBjc, Bjc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBir, Bir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBdata, Bdata, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_multADAGradTile
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint y, jint x, jint nnz, jfloatArray jA, jint lda, jfloatArray jBdata, jintArray jBir, jintArray jBjc,
jfloatArray jMM, jfloatArray jSumsq, jfloatArray jMask, int maskrows, jfloatArray jlrate, jint lrlen,
jfloatArray jvexp, jint vexplen, jfloatArray jtexp, jint texplen, jfloat istep, jint addgrad, jfloat epsilon, jint biasv, jint nbr)
{
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * Bdata = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jBdata, JNI_FALSE));
int * Bir = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBir, JNI_FALSE));
int * Bjc = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBjc, JNI_FALSE));
float * MM = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMM, JNI_FALSE));
float * Sumsq = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jSumsq, JNI_FALSE));
float * lrate = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jlrate, JNI_FALSE));
float * vexp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jvexp, JNI_FALSE));
float * texp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jtexp, JNI_FALSE));
float * Mask = NULL;
int i;
int ioff = Bjc[0];
if (jMask != NULL) Mask = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMask, JNI_FALSE));
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int jstart = Bjc[i] - ioff;
int jend = Bjc[i+1] - ioff;
int j;
if (Mask != NULL || lrlen > 1 || vexplen > 1 || texplen > 1) {
for (j = jstart; j < jend ; j++) {
float bval = Bdata[j];
int ival = Bir[j] - ioff - x;
if (ival >= 0 && ival < ncols) {
int k;
for (k = 0; k < nrows; k++) {
float lr, ve, te, pve, ste, ngrad;
float grad = A[y+k+i*lda]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[k+y] : lrate[0];
ve = (vexplen > 1) ? vexp[k+y] : vexp[0];
te = (texplen > 1) ? texp[k+y] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[ival] == 0) MM[ihere] = 0;
}
}
}
}
}
if (biasv > 0) {
int ival = nbr;
int k;
for (k = 0; k < nrows; k++) {
float lr, ve, te, pve, ste, ngrad;
float grad = A[k+y+i*lda];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
lr = (lrlen > 1) ? lrate[k+y] : lrate[0];
ve = (vexplen > 1) ? vexp[k+y] : vexp[0];
te = (texplen > 1) ? texp[k+y] : texp[0];
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[ival] == 0) MM[ihere] = 0;
}
}
}
}
} else {
float lr, ve, te, pve, ste, ngrad;
lr = lrate[0];
ve = vexp[0];
te = texp[0];
for (j = jstart; j < jend ; j++) {
float bval = Bdata[j];
int ival = Bir[j] - ioff - x;
if (ival >= 0 && ival < ncols) {
int k;
if (addgrad && ve == 0.5f && te == 0.5f) {
for (k = 0; k < nrows; k++) {
float grad = A[k+y+i*lda]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
pve = sqrt(Sumsq[ihere]);
ngrad = grad * lr / pve;
MM[ihere] += ngrad;
}
} else {
for (k = 0; k < nrows; k++) {
float grad = A[k+y+i*nrows]*bval;
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
}
}
}
}
if (biasv > 0) {
int ival = nbr;
int k;
if (addgrad && ve == 0.5f && te == 0.5f) {
for (k = 0; k < nrows; k++) {
float grad = A[k+y+i*lda];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
pve = sqrt(Sumsq[ihere]);
ngrad = grad * lr / pve;
MM[ihere] += ngrad;
}
} else {
for (k = 0; k < nrows; k++) {
float grad = A[k+y+i*nrows];
int ihere = k+ival*nrows;
Sumsq[ihere] += grad*grad + epsilon;
if (addgrad) {
pve = (ve == 0) ? 1.0f : pow(Sumsq[ihere] * istep, ve);
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
}
}
}
}
}
if (Mask != NULL) (*env)->ReleasePrimitiveArrayCritical(env, jMask, Mask, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jtexp, texp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jvexp, vexp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jlrate, lrate, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jSumsq, Sumsq, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jMM, MM, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBjc, Bjc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBir, Bir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBdata, Bdata, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
__forceinline long long __pairembed(long long r1x, int r2x) {
long long r1 = r1x+1;
int r2 = r2x+1;
float loc1 = (float) r1;
float loc2 = (float) r2;
int nbits1 = ((*(int *)(&loc1)) >> 23) - 126;
int nbits2 = ((*(int *)(&loc2)) >> 23) - 126;
int len = nbits1 + nbits2 - 2;
float loc3 = (float) len;
int lenbits = 0;
long long x;
if (len > 1) lenbits = ((*(int *)(&loc3)) >> 23) - 127;
r2 = r2 & ((1 << (nbits2-1)) - 1);
x = (((r1 << (nbits2-1)) | r2) << lenbits) | (nbits2-1);
return (x-2) >= 0 ? (x-2) : 0;
}
__forceinline void __gupdate(float grad, int i, int ihere, int jhere, float *MM, float *Sumsq, float *Mask, int maskrows, float *lrate, int lrlen,
float *vexp, int vexplen, float *texp, int texplen, float istep, int addgrad, float epsilon) {
float lr, ve, te, pve, ste, ngrad, ssq, ssqnew;
ssq = Sumsq[ihere];
ssqnew = hypotf(grad,ssq);
Sumsq[ihere] += ssqnew - ssq;
ssq = ssqnew * sqrtf(istep);
if (addgrad) {
lr = (lrlen > 1) ? lrate[i] : lrate[0];
ve = (vexplen > 1) ? vexp[i] : vexp[0];
te = (texplen > 1) ? texp[i] : texp[0];
pve = (ve == 0.5f) ? ssq : ((ve == 0) ? 1.0f : pow(ssq, 2*ve));
ste = pow(istep, te);
ngrad = grad * lr * ste / pve;
MM[ihere] += ngrad;
}
if (Mask != NULL) {
if (maskrows > 1) {
if (Mask[ihere] == 0) MM[ihere] = 0;
} else {
if (Mask[jhere] == 0) MM[ihere] = 0;
}
}
}
JNIEXPORT void JNICALL Java_edu_berkeley_bid_CPUMACH_pairMultADAGradTile
(JNIEnv *env, jobject obj, jint nrows, jint ncols, jint bound1, jint bound2, jfloatArray jA, jint lda, jint aroff, jint acoff,
jfloatArray jBdata, jintArray jBir, jintArray jBjc, jint broff, jint bcoff,
jfloatArray jMM, jint ldmm, jfloatArray jSumsq, jfloatArray jMask, int maskrows, jfloatArray jlrate, jint lrlen,
jfloatArray jvexp, jint vexplen, jfloatArray jtexp, jint texplen, jfloat istep, jint addgrad, jfloat epsilon, jint biasv, jint nbr)
{
float * A = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE));
float * Bdata = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jBdata, JNI_FALSE));
int * Bir = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBir, JNI_FALSE));
int * Bjc = (jint *)((*env)->GetPrimitiveArrayCritical(env, jBjc, JNI_FALSE));
float * MM = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMM, JNI_FALSE));
float * Sumsq = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jSumsq, JNI_FALSE));
float * lrate = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jlrate, JNI_FALSE));
float * vexp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jvexp, JNI_FALSE));
float * texp = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jtexp, JNI_FALSE));
float * Mask = NULL;
int i;
int ioff = Bjc[0];
if (jMask != NULL) Mask = (jfloat *)((*env)->GetPrimitiveArrayCritical(env, jMask, JNI_FALSE));
#pragma omp parallel for
for (i = 0; i < ncols; i++) {
int jstart = Bjc[i+bcoff] - ioff;
int jend = Bjc[i+1+bcoff] - ioff;
int j1, j2, k, ihere, jhere, ithere, jthere, doit, r1, r2;
float grad, f1, f2, prod;
long long rank;
for (j1 = jstart; j1 < jend ; j1++) {
f1 = Bdata[jstart + j1]; // Get the feature
r1 = Bir[jstart + j1]-broff-ioff; // And its row index
rank = r1;
if (r1 >= 0 && r1 < bound1) {
for (k = 0; k < nrows; k++) {
ihere = k + aroff + lda * (i + acoff);
jhere = k + aroff;
ithere = k + 2 * ldmm * rank;
jthere = 2 * rank;
grad = A[ihere] * f1; // raw gradient
__gupdate(grad, jhere, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
for (j2 = jstart; j2 < j1 ; j2++) {
f2 = Bdata[jstart + j2]; // Get the other feature
r2 = Bir[jstart + j2]-broff-ioff;
if (r2 >= 0) {
rank = __pairembed(r1, r2);
if (rank < bound2) {
prod = f1 * f2;
for (k = 0; k < nrows; k++) {
ihere = k + aroff + lda * (i + acoff);
jhere = k + aroff;
ithere = ldmm + k + 2 * ldmm * rank;
jthere = 1 + 2 * rank;
grad = A[ihere] * prod; // raw gradient
__gupdate(grad, jhere, ithere, jthere, MM, Sumsq, Mask, maskrows, lrate, lrlen, vexp, vexplen, texp, texplen, istep, addgrad, epsilon);
}
}
}
}
}
}
}
if (Mask != NULL) (*env)->ReleasePrimitiveArrayCritical(env, jMask, Mask, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jtexp, texp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jvexp, vexp, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jlrate, lrate, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jSumsq, Sumsq, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jMM, MM, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBjc, Bjc, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBir, Bir, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jBdata, Bdata, 0);
(*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0);
}
|
ChMatrix.h | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Alessandro Tasora, Radu Serban
// =============================================================================
#ifndef CHMATRIX_H
#define CHMATRIX_H
#include <immintrin.h>
#include "chrono/core/ChCoordsys.h"
#include "chrono/core/ChException.h"
#include "chrono/ChConfig.h"
#include "chrono/serialization/ChArchive.h"
#include "chrono/serialization/ChArchiveAsciiDump.h"
namespace chrono {
//
// FAST MACROS TO SPEEDUP CODE
//
#define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val)
#define Get33Element(a, b) GetElementN((a * 3) + (b))
#define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get34Element(a, b) GetElementN((a * 4) + (b))
#define Set34Row(ma, a, val0, val1, val2, val3) \
ma.SetElementN((a * 4), val0); \
ma.SetElementN((a * 4) + 1, val1); \
ma.SetElementN((a * 4) + 2, val2); \
ma.SetElementN((a * 4) + 3, val3);
#define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get44Element(a, b) GetElementN((a * 4) + (b))
// forward declaration
template <class Real = double>
class ChMatrixDynamic;
///
/// ChMatrix:
///
/// A base class for matrix objects (tables of NxM numbers).
/// To access elements, the indexes start from zero, and
/// you must indicate first row, then column, that is: m(2,4)
/// means the element at 3rd row, 5th column.
/// This is an abstract class, so you cannot instantiate
/// objects from it: you must rather create matrices using the
/// specialized child classes like ChMatrixDynamic, ChMatrixNM,
/// ChMatrix33 and so on; all of them have this same base class.
/// Warning: for optimization reasons, not all functions will
/// check about boundaries of element indexes and matrix sizes (in
/// some cases, if sizes are wrong, debug asserts are used).
///
/// Further info at the @ref mathematical_objects manual page.
template <class Real = double>
class ChMatrix {
protected:
//
// DATA
//
int rows;
int columns;
Real* address;
public:
//
// CONSTRUCTORS (none - abstract class that must be implemented with child classes)
//
virtual ~ChMatrix() {}
//
// OPERATORS OVERLOADING
//
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// For example: m(3,5) gets the element at the 4th row, 6th column.
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int row, const int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& operator()(const int row, const int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the ordinal of the element (indexes start from 0).
/// For example: m(3) gets the 4th element, counting row by row.
/// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector).
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int el) {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
const Real& operator()(const int el) const {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
/// The [] operator returns the address of the n-th row. This is mostly
/// for compatibility with old matrix programming styles (2d array-like)
/// where to access an element at row i, column j, one can write mymatrix[i][j].
Real* operator[](const int row) {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
const Real* operator[](const int row) const {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
/// Multiplies this matrix by a factor, in place
ChMatrix<Real>& operator*=(const Real factor) {
MatrScale(factor);
return *this;
}
/// Increments this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) {
MatrInc(matbis);
return *this;
}
/// Decrements this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) {
MatrDec(matbis);
return *this;
}
/// Matrices are equal?
bool operator==(const ChMatrix<Real>& other) { return Equals(other); }
/// Matrices are not equal?
bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); }
/// Assignment operator
ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) {
if (&matbis != this)
CopyFromMatrix(matbis);
return *this;
}
template <class RealB>
ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) {
CopyFromMatrix(matbis);
return *this;
}
//
// FUNCTIONS
//
/// Sets the element at row,col position. Indexes start with zero.
void SetElement(int row, int col, Real elem) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
*(address + col + (row * columns)) = elem;
}
/// Gets the element at row,col position. Indexes start with zero.
/// The return value is a copy of original value. Use Element() instead if you
/// want to access directly by reference the original element.
Real GetElement(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
Real GetElement(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
/// Sets the Nth element, counting row after row.
void SetElementN(int index, Real elem) {
assert(index >= 0 && index < (rows * columns)); // boundary checks
*(address + index) = elem;
}
/// Gets the Nth element, counting row after row.
Real GetElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real GetElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10.
Real& Element(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& Element(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Access a single element of the matrix, the Nth element, counting row after row.
/// Value is returned by reference, so it can be modified, like in m.Element(5)=10.
Real& ElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real& ElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access directly the "Real* address" buffer. Warning! this is a low level
/// function, it should be used in rare cases, if really needed!
Real* GetAddress() { return address; }
const Real* GetAddress() const { return address; }
/// Gets the number of rows
int GetRows() const { return rows; }
/// Gets the number of columns
int GetColumns() const { return columns; }
/// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes!
virtual void Resize(int nrows, int ncols) {}
/// Swaps the columns a and b
void SwapColumns(int a, int b) {
Real temp;
for (int i = 0; i < rows; i++) {
temp = GetElement(i, a);
SetElement(i, a, GetElement(i, b));
SetElement(i, b, temp);
}
}
/// Swap the rows a and b
void SwapRows(int a, int b) {
Real temp;
for (int i = 0; i < columns; i++) {
temp = GetElement(a, i);
SetElement(a, i, GetElement(b, i));
SetElement(b, i, temp);
}
}
/// Fill the diagonal elements, given a sample.
/// Note that the matrix must already be square (no check for
/// rectangular matrices!), and the extra-diagonal elements are
/// not modified -this function does not set them to 0-
void FillDiag(Real sample) {
for (int i = 0; i < rows; ++i)
SetElement(i, i, sample);
}
/// Fill the matrix with the same value in all elements
void FillElem(Real sample) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, sample);
}
/// Fill the matrix with random float numbers, falling within the
/// "max"/"min" range.
void FillRandom(Real max, Real min) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, min + (Real)ChRandom() * (max - min));
}
/// Resets the matrix to zero (warning: simply sets memory to 0 bytes!)
void Reset() {
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to zeroes and (if needed) changes the size to have row and col
void Reset(int nrows, int ncols) {
Resize(nrows, ncols);
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to identity matrix (ones on diagonal, zero elsewhere)
void SetIdentity() {
Reset();
FillDiag(1.0);
}
/// Copy a matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrix(const ChMatrix<RealB>& matra) {
Resize(matra.GetRows(), matra.GetColumns());
// ElementsCopy(address, matra.GetAddress(), rows*columns);
// memcpy (address, matra.address, (sizeof(Real) * rows * columns));
for (int i = 0; i < rows * columns; ++i)
address[i] = (Real)matra.GetAddress()[i];
}
/// Copy the transpose of matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrixT(const ChMatrix<RealB>& matra) {
Resize(matra.GetColumns(), matra.GetRows());
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
SetElement(j, i, (Real)matra.Element(i, j));
}
/// Copy the transposed upper triangular part of "matra" in the lower triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ //
{ // \ A'| ---> | \ //
Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ //
for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(j, i, (Real)matra.GetElement(i, j));
}
}
/// Copy the transposed lower triangulat part of "matra" in the upper triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | //
{ // | \ ---> \this| //
Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | //
for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(i, j, (Real)matra.GetElement(j, i));
}
}
//
// STREAMING
//
/// Method to allow serialization of transient data in archives.
virtual void ArchiveOUT(ChArchiveOut& marchive) {
// suggested: use versioning
marchive.VersionWrite(1);
// stream out all member data
marchive << make_ChNameValue("rows", rows);
marchive << make_ChNameValue("columns", columns);
// custom output of matrix data as array
if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) {
// CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump:
for (int i = 0; i < rows; i++) {
mascii->indent();
for (int j = 0; j < columns; j++) {
(*mascii->GetStream()) << Element(i, j);
mascii->GetStream()->operator<<(", ");
}
mascii->GetStream()->operator<<("\n");
}
} else {
// NORMAL array-based serialization:
int tot_elements = GetRows() * GetColumns();
marchive.out_array_pre("data", tot_elements, typeid(Real).name());
for (int i = 0; i < tot_elements; i++) {
marchive << CHNVP(ElementN(i), "");
marchive.out_array_between(tot_elements, typeid(Real).name());
}
marchive.out_array_end(tot_elements, typeid(Real).name());
}
}
/// Method to allow de serialization of transient data from archives.
virtual void ArchiveIN(ChArchiveIn& marchive) {
// suggested: use versioning
int version = marchive.VersionRead();
// stream in all member data
int m_row, m_col;
marchive >> make_ChNameValue("rows", m_row);
marchive >> make_ChNameValue("columns", m_col);
Reset(m_row, m_col);
// custom input of matrix data as array
size_t tot_elements = GetRows() * GetColumns();
marchive.in_array_pre("data", tot_elements);
for (int i = 0; i < tot_elements; i++) {
marchive >> CHNVP(ElementN(i));
marchive.in_array_between("data");
}
marchive.in_array_end("data");
}
/// Method to allow serializing transient data into in ascii
/// as a readable item, for example "chrono::GetLog() << myobject;"
/// ***OBSOLETE***
void StreamOUT(ChStreamOutAscii& mstream) {
mstream << "\n"
<< "Matrix " << GetRows() << " rows, " << GetColumns() << " columns."
<< "\n";
for (int i = 0; i < ChMin(GetRows(), 8); i++) {
for (int j = 0; j < ChMin(GetColumns(), 8); j++)
mstream << GetElement(i, j) << " ";
if (GetColumns() > 8)
mstream << "...";
mstream << "\n";
}
if (GetRows() > 8)
mstream << "... \n\n";
}
/// Method to allow serializing transient data into an ascii stream (ex. a file)
/// as a Matlab .dat file (all numbers in a row, separated by space, then CR)
void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) {
for (int ii = 0; ii < this->GetRows(); ii++) {
for (int jj = 0; jj < this->GetColumns(); jj++) {
mstream << this->GetElement(ii, jj);
if (jj < (this->GetColumns() - 1))
mstream << " ";
}
mstream << "\n";
}
}
//
// MATH MEMBER FUNCTIONS.
// For speed reasons, sometimes size checking of operands is left to the user!
//
/// Changes the sign of all the elements of this matrix, in place.
void MatrNeg() {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = -ElementN(nel);
}
/// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B].
template <class RealB, class RealC>
void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel));
}
/// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B].
template <class RealB, class RealC>
void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel));
}
/// Increments this matrix with another matrix A, as: [this]+=[A]
template <class RealB>
void MatrInc(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) += (Real)matra.ElementN(nel);
}
/// Decrements this matrix with another matrix A, as: [this]-=[A]
template <class RealB>
void MatrDec(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) -= (Real)matra.ElementN(nel);
}
/// Scales a matrix, multiplying all elements by a constant value: [this]*=f
void MatrScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= factor;
}
/// Scales a matrix, multiplying all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= (Real)matra.ElementN(nel);
}
/// Scales a matrix, dividing all elements by a constant value: [this]/=f
void MatrDivScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= factor;
}
/// Scales a matrix, dividing all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrDivScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= (Real)matra.ElementN(nel);
}
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
template <class RealB, class RealC>
void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
#ifdef CHRONO_HAS_AVX
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
/// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3
/// Generally, as the matra.GetColumns() increases the method performs better
void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
double* this_Add = this->GetAddress();
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int colB = 0; colB < B_NCol; colB += 4) {
__m256d sum = _mm256_setzero_pd();
for (int elem = 0; elem < A_NCol; elem++) {
__m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem);
__m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
_mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum);
}
}
}
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8
/// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0
void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->GetRows() == matra.GetRows());
assert(this->GetColumns() == matrb.GetRows());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
bool NeedsPadding = (B_NCol % 4 != 0);
int CorrectFAT = ((B_NCol >> 2) << 2);
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int rowB = 0; rowB < B_Nrow; rowB++) {
int colB;
double temp_sum = 0.0;
__m256d sum = _mm256_setzero_pd();
for (colB = 0; colB < CorrectFAT; colB += 4) {
__m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB);
__m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
sum = _mm256_hadd_pd(sum, sum);
temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2];
if (NeedsPadding)
for (colB = CorrectFAT; colB < B_NCol; colB++) {
temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB));
}
SetElement(rowA, rowB, temp_sum);
}
}
}
#endif
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B);
/// Note: no check on mistaken size of this!
template <class RealB, class RealC>
void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetRows());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetRows(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col));
SetElement(row, colres, sum);
}
}
}
/// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B]
/// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B);
template <class RealB, class RealC>
void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetRows() == matrb.GetRows());
assert(this->rows == matra.GetColumns());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetColumns(); ++row) {
sum = 0;
for (col = 0; col < (matra.GetRows()); ++col)
sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
/// Computes dot product between two column-matrices (vectors) with
/// same size. Returns a scalar value.
template <class RealB, class RealC>
static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) {
assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows());
Real tot = 0;
for (int i = 0; i < ma.GetRows(); ++i)
tot += (Real)(ma.ElementN(i) * mb.ElementN(i));
return tot;
}
/// Transpose this matrix in place
void MatrTranspose() {
if (columns == rows) // Square transp.is optimized
{
for (int row = 0; row < rows; ++row)
for (int col = row; col < columns; ++col)
if (row != col) {
Real temp = Element(row, col);
Element(row, col) = Element(col, row);
Element(col, row) = temp;
}
int tmpr = rows;
rows = columns;
columns = tmpr;
} else // Naive implementation for rectangular case. Not in-place. Slower.
{
ChMatrixDynamic<Real> matrcopy(*this);
int tmpr = rows;
rows = columns;
columns = tmpr; // dont' realloc buffer, anyway
for (int row = 0; row < rows; ++row)
for (int col = 0; col < columns; ++col)
Element(row, col) = matrcopy.Element(col, row);
}
}
/// Returns the determinant of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
Real Det() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix determinant because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix determinant because matr. larger than 3x3");
Real det = 0;
switch (this->GetRows()) {
case 1:
det = (*this)(0, 0);
break;
case 2:
det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
break;
case 3:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) -
(*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1);
break;
case 4:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) +
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) +
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) +
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) -
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) -
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) -
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1);
break;
}
return det;
}
/// Returns the inverse of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
void MatrInverse() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
assert(this->Det() != 0);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix inverse because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix inverse because matr. larger than 4x4");
if (this->Det() == 0)
throw("Cannot compute matrix inverse because singular matrix");
switch (this->GetRows()) {
case 1:
(*this)(0, 0) = (1 / (*this)(0, 0));
break;
case 2: {
ChMatrixDynamic<Real> inv(2, 2);
inv(0, 0) = (*this)(1, 1);
inv(0, 1) = -(*this)(0, 1);
inv(1, 1) = (*this)(0, 0);
inv(1, 0) = -(*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 3: {
ChMatrixDynamic<Real> inv(3, 3);
inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1);
inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2);
inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1);
inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2);
inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2);
inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0);
inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0);
inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1);
inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 4: {
ChMatrixDynamic<Real> inv(4, 4);
inv.SetElement(
0, 0,
(*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 1,
(*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 2,
(*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
0, 3,
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
1, 0,
(*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 1,
(*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 2,
(*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
1, 3,
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
2, 0,
(*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 1,
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 2,
(*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3));
inv.SetElement(
2, 3,
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3));
inv.SetElement(
3, 0,
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 1,
(*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 2,
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2));
inv.SetElement(
3, 3,
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2));
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
}
}
/// Returns true if vector is identical to other matrix
bool Equals(const ChMatrix<Real>& other) { return Equals(other, 0.0); }
/// Returns true if vector equals another vector, within a tolerance 'tol'
bool Equals(const ChMatrix<Real>& other, Real tol) {
if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows))
return false;
for (int nel = 0; nel < rows * columns; ++nel)
if (fabs(ElementN(nel) - other.ElementN(nel)) > tol)
return false;
return true;
}
/// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a vector.
template <class RealB>
ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 3) && (columns == 4));
return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() +
Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(),
Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() +
Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(),
Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() +
Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3());
}
/// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) {
assert((rows == 3) && (columns == 4));
return ChQuaternion<Real>(
Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(),
Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(),
Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(),
Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z());
}
/// Multiplies this 4x4 matrix (transposed) by a quaternion,
/// The matrix must be 4x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 4) && (columns == 4));
return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() +
Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(),
Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() +
Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(),
Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() +
Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(),
Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() +
Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3());
}
/// Transposes only the lower-right 3x3 submatrix of a hemisymmetric 4x4 matrix,
/// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q:
/// the non commutative quat. product is:
/// q1 x q2 = [q1]*q2 = [q2st]*q1
/// where [q2st] is the "semi-transpose of [q2].
void MatrXq_SemiTranspose() {
SetElement(1, 2, -GetElement(1, 2));
SetElement(1, 3, -GetElement(1, 3));
SetElement(2, 1, -GetElement(2, 1));
SetElement(2, 3, -GetElement(2, 3));
SetElement(3, 1, -GetElement(3, 1));
SetElement(3, 2, -GetElement(3, 2));
}
/// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix,
/// The product between a quaternion q1 and the conjugate of q2 (q2'), is:
/// q1 x q2' = [q1]*q2' = [q1sn]*q2
/// where [q1sn] is the semi-negation of the 4x4 matrix [q1].
void MatrXq_SemiNeg() {
for (int i = 0; i < rows; ++i)
for (int j = 1; j < columns; ++j)
SetElement(i, j, -GetElement(i, j));
}
/// Gets the norm infinite of the matrix, i.e. the max.
/// of its elements in absolute value.
Real NormInf() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
if ((fabs(ElementN(nel))) > norm)
norm = fabs(ElementN(nel));
return norm;
}
/// Gets the norm two of the matrix, i.e. the square root
/// of the sum of the elements squared.
Real NormTwo() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
norm += ElementN(nel) * ElementN(nel);
return (sqrt(norm));
}
/// Finds max value among the values of the matrix
Real Max() {
Real mmax = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) > mmax)
mmax = ElementN(nel);
return mmax;
}
/// Finds min value among the values of the matrix
Real Min() {
Real mmin = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) < mmin)
mmin = ElementN(nel);
return mmin;
}
/// Linear interpolation of two matrices. Parameter mx must be 0...1.
/// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!!
void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) {
assert(matra.columns == matrb.columns && matra.rows == matrb.rows);
for (int nel = 0; nel < rows * columns; nel++)
ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx);
}
/// Fills a matrix or a vector with a bilinear interpolation,
/// from corner values (as a u-v patch).
void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) {
for (int iu = 0; iu < GetColumns(); iu++)
for (int iv = 0; iv < GetRows(); iv++) {
if (GetRows() > 1)
Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1)));
if (GetColumns() > 1)
Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1)));
}
}
//
// BOOKKEEPING
//
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) += (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) += (Real)matra.Element(i, j);
}
/// Paste a clipped portion of the matrix "matra" into "this",
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a clipped portion of the matrix "matra" into "this", where "this"
/// is a vector (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j);
}
/// Paste a clipped portion of a vector into "this", where "this"
/// is a matrix (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j);
}
/// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values,
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteSumClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
#pragma omp atomic
Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a vector "va" into the matrix.
template <class RealB>
void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)va.x());
SetElement(insrow + 1, inscol, (Real)va.y());
SetElement(insrow + 2, inscol, (Real)va.z());
}
/// Paste a vector "va" into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)va.x();
Element(insrow + 1, inscol) += (Real)va.y();
Element(insrow + 2, inscol) += (Real)va.z();
}
/// Paste a vector "va" into the matrix, subtracting it from preexisting values.
template <class RealB>
void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) -= (Real)va.x();
Element(insrow + 1, inscol) -= (Real)va.y();
Element(insrow + 2, inscol) -= (Real)va.z();
}
/// Paste a quaternion into the matrix.
template <class RealB>
void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)qa.e0());
SetElement(insrow + 1, inscol, (Real)qa.e1());
SetElement(insrow + 2, inscol, (Real)qa.e2());
SetElement(insrow + 3, inscol, (Real)qa.e3());
}
/// Paste a quaternion into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)qa.e0();
Element(insrow + 1, inscol) += (Real)qa.e1();
Element(insrow + 2, inscol) += (Real)qa.e2();
Element(insrow + 3, inscol) += (Real)qa.e3();
}
/// Paste a coordsys into the matrix.
template <class RealB>
void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) {
PasteVector(cs.pos, insrow, inscol);
PasteQuaternion(cs.rot, insrow + 3, inscol);
}
/// Returns the vector clipped from insrow, inscol.
ChVector<Real> ClipVector(int insrow, int inscol) const {
return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol));
}
/// Returns the quaternion clipped from insrow, inscol.
ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const {
return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol),
Element(insrow + 3, inscol));
}
/// Returns the coordsys clipped from insrow, inscol.
ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const {
return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol));
}
//
// MULTIBODY SPECIFIC MATH FUCTION
//
/// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product.
/// That is, given two quaternions a and b, aXb= [Astar]*b
template <class RealB>
void Set_Xq_matrix(const ChQuaternion<RealB>& q) {
Set44Element(0, 0, (Real)q.e0());
Set44Element(0, 1, -(Real)q.e1());
Set44Element(0, 2, -(Real)q.e2());
Set44Element(0, 3, -(Real)q.e3());
Set44Element(1, 0, (Real)q.e1());
Set44Element(1, 1, (Real)q.e0());
Set44Element(1, 2, -(Real)q.e3());
Set44Element(1, 3, (Real)q.e2());
Set44Element(2, 0, (Real)q.e2());
Set44Element(2, 1, (Real)q.e3());
Set44Element(2, 2, (Real)q.e0());
Set44Element(2, 3, -(Real)q.e1());
Set44Element(3, 0, (Real)q.e3());
Set44Element(3, 1, -(Real)q.e2());
Set44Element(3, 2, (Real)q.e1());
Set44Element(3, 3, (Real)q.e0());
}
};
} // end namespace chrono
#endif
|
SPOSet.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Ying Wai Li, yingwaili@ornl.gov, Oak Ridge National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_SINGLEPARTICLEORBITALSETBASE_H
#define QMCPLUSPLUS_SINGLEPARTICLEORBITALSETBASE_H
#include "OhmmsPETE/OhmmsArray.h"
#include "Particle/ParticleSet.h"
#include "Particle/VirtualParticleSet.h"
#include "QMCWaveFunctions/OrbitalSetTraits.h"
#include "io/hdf_archive.h"
#if !defined(ENABLE_SOA)
#include "Message/CommOperators.h"
#endif
#ifdef QMC_CUDA
#include "type_traits/CUDATypes.h"
#endif
namespace qmcplusplus
{
/** base class for Single-particle orbital sets
*
* SPOSet stands for S(ingle)P(article)O(rbital)Set which contains
* a number of single-particle orbitals with capabilities of evaluating \f$ \psi_j({\bf r}_i)\f$
*/
class SPOSet : public QMCTraits
{
public:
typedef OrbitalSetTraits<ValueType>::IndexVector_t IndexVector_t;
typedef OrbitalSetTraits<ValueType>::ValueVector_t ValueVector_t;
typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t;
typedef OrbitalSetTraits<ValueType>::GradVector_t GradVector_t;
typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t;
typedef OrbitalSetTraits<ValueType>::HessMatrix_t HessMatrix_t;
typedef OrbitalSetTraits<ValueType>::HessType HessType;
typedef Array<HessType, OHMMS_DIM> HessArray_t;
typedef OrbitalSetTraits<ValueType>::GradHessType GGGType;
typedef OrbitalSetTraits<ValueType>::GradHessVector_t GGGVector_t;
typedef OrbitalSetTraits<ValueType>::GradHessMatrix_t GGGMatrix_t;
typedef OrbitalSetTraits<ValueType>::VGLVector_t VGLVector_t;
typedef ParticleSet::Walker_t Walker_t;
typedef std::map<std::string, SPOSet*> SPOPool_t;
/** name of the object
*
* Several user classes can own SPOSet and use objectName as counter
*/
std::string objectName;
#if !defined(ENABLE_SOA)
///true if C is an identity matrix
bool Identity;
///if true, do not clean up
bool IsCloned;
///number of Single-particle orbitals
IndexType BasisSetSize;
/** pointer matrix containing the coefficients
*
* makeClone makes a shallow copy
*/
ValueMatrix_t* C;
///occupation number
Vector<RealType> Occ;
///Pass Communicator
Communicate* myComm;
#endif
/** constructor */
SPOSet(bool ion_deriv = false, bool optimizable = false);
/** destructor
*
* Derived class destructor needs to pay extra attention to freeing memory shared among clones of SPOSet.
*/
virtual ~SPOSet()
{
#if !defined(ENABLE_SOA)
if (!IsCloned && C != nullptr)
delete C;
#endif
}
// accessor function to Optimizable
inline bool isOptimizable() const { return Optimizable; }
/** return the size of the orbital set
* Ye: this needs to be replaced by getOrbitalSetSize();
*/
inline int size() const { return OrbitalSetSize; }
/** print basic SPOSet information
*/
void basic_report(const std::string& pad = "");
/** print SPOSet information
*/
virtual void report(const std::string& pad = "") { basic_report(pad); }
/** return the size of the orbitals
*/
inline int getOrbitalSetSize() const { return OrbitalSetSize; }
/** Query if this SPOSet has an explicit ion dependence. returns true if it does.
*/
inline bool hasIonDerivs() const { return ionDerivs; }
#if !defined(ENABLE_SOA)
int getBasisSetSize() const { return BasisSetSize; }
bool setIdentity(bool useIdentity);
void checkObject();
///get C and Occ
bool put(xmlNodePtr cur);
#else
/// return the size of the basis set if there is any
virtual int getBasisSetSize() const { return 0; }
/// check a few key parameters before putting the SPO into a determinant
virtual void checkObject() const {}
#endif
/// create optimizable orbital rotation parameters
virtual void buildOptVariables(const std::vector<std::pair<int, int>>& rotations) {}
/// reset parameters to the values from optimizer
virtual void resetParameters(const opt_variables_type& optVariables) = 0;
/// check in/out parameters to the global list of parameters used by the optimizer
virtual void checkInVariables(opt_variables_type& active) {}
virtual void checkOutVariables(const opt_variables_type& active) {}
/** Evaluate the derivative of the optimized orbitals with respect to the parameters
* this is used only for MSD, to be refined for better serving both single and multi SD
*/
virtual void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi,
const ValueType& psiCurrent,
const std::vector<ValueType>& Coeff,
const std::vector<size_t>& C2node_up,
const std::vector<size_t>& C2node_dn,
const ValueVector_t& detValues_up,
const ValueVector_t& detValues_dn,
const GradMatrix_t& grads_up,
const GradMatrix_t& grads_dn,
const ValueMatrix_t& lapls_up,
const ValueMatrix_t& lapls_dn,
const ValueMatrix_t& M_up,
const ValueMatrix_t& M_dn,
const ValueMatrix_t& Minv_up,
const ValueMatrix_t& Minv_dn,
const GradMatrix_t& B_grad,
const ValueMatrix_t& B_lapl,
const std::vector<int>& detData_up,
const size_t N1,
const size_t N2,
const size_t NP1,
const size_t NP2,
const std::vector<std::vector<int>>& lookup_tbl)
{}
/** reset the target particleset
* this is used to reset the pointer to ion-electron distance table needed by LCAO basis set.
* Ye: Only AoS needs it, SoA LCAO doesn't need this. Reseting pointers is a state machine very hard to maintain.
* This interface should be removed with AOS.
*/
virtual void resetTargetParticleSet(ParticleSet& P) = 0;
/** set the OrbitalSetSize
* @param norbs number of single-particle orbitals
* Ye: I prefer to remove this interface in the future. SPOSet builders need to handle the size correctly.
* It doesn't make sense allowing to set the value at any place in the code.
*/
virtual void setOrbitalSetSize(int norbs) = 0;
/** Evaluate the SPO value at an explicit position.
* Ye: This is used only for debugging the CUDA code and should be removed.
*/
virtual void evaluate(const ParticleSet& P, PosType& r, ValueVector_t& psi);
/** evaluate the values of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
*/
virtual void evaluate(const ParticleSet& P, int iat, ValueVector_t& psi) = 0;
/** evaluate the values of this single-particle orbital sets of multiple walkers
* @param spo_list the list of SPOSet pointers in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat active particle
* @param psi_v_list the list of value vector pointers in a walker batch
*/
virtual void mw_evaluateValue(const std::vector<SPOSet*>& spo_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<ValueVector_t*>& psi_v_list)
{
#pragma omp parallel for
for (int iw = 0; iw < spo_list.size(); iw++)
spo_list[iw]->evaluate(*P_list[iw], iat, *psi_v_list[iw]);
}
/** evaluate determinant ratios for virtual moves, e.g., sphere move for nonlocalPP
* @param VP virtual particle set
* @param psi values of the SPO, used as a scratch space if needed
* @param psiinv the row of inverse slater matrix corresponding to the particle moved virtually
* @param ratios return determinant ratios
*/
virtual void evaluateDetRatios(const VirtualParticleSet& VP,
ValueVector_t& psi,
const ValueVector_t& psiinv,
std::vector<ValueType>& ratios);
/** evaluate the values, gradients and laplacians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param d2psi laplacians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
ValueVector_t& d2psi) = 0;
/** evaluate the values, gradients and laplacians of this single-particle orbital sets of multiple walkers
* @param spo_list the list of SPOSet pointers in a walker batch
* @param P_list the list of ParticleSet pointers in a walker batch
* @param iat active particle
* @param psi_v_list the list of value vector pointers in a walker batch
* @param dpsi_v_list the list of gradient vector pointers in a walker batch
* @param d2psi_v_list the list of laplacian vector pointers in a walker batch
*/
virtual void mw_evaluateVGL(const std::vector<SPOSet*>& spo_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<ValueVector_t*>& psi_v_list,
const std::vector<GradVector_t*>& dpsi_v_list,
const std::vector<ValueVector_t*>& d2psi_v_list)
{
#pragma omp parallel for
for (int iw = 0; iw < spo_list.size(); iw++)
spo_list[iw]->evaluate(*P_list[iw], iat, *psi_v_list[iw], *dpsi_v_list[iw], *d2psi_v_list[iw]);
}
/** evaluate the values, gradients and hessians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param grad_grad_psi hessians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
HessVector_t& grad_grad_psi);
/** evaluate the values, gradients, hessians, and grad hessians of this single-particle orbital set
* @param P current ParticleSet
* @param iat active particle
* @param psi values of the SPO
* @param dpsi gradients of the SPO
* @param grad_grad_psi hessians of the SPO
* @param grad_grad_grad_psi grad hessians of the SPO
*/
virtual void evaluate(const ParticleSet& P,
int iat,
ValueVector_t& psi,
GradVector_t& dpsi,
HessVector_t& grad_grad_psi,
GGGVector_t& grad_grad_grad_psi);
/** evaluate the third derivatives of this single-particle orbital set
* @param P current ParticleSet
* @param first first particle
* @param last last particle
* @param grad_grad_grad_logdet third derivatives of the SPO
*/
virtual void evaluateThirdDeriv(const ParticleSet& P, int first, int last, GGGMatrix_t& grad_grad_grad_logdet);
///////////////////////////////////////////////////////////////////////////////////////////////////
/// \brief returns whether this is an LCOrbitalSetOpt object
/// Ye: This should be removed as AoS. On the SoA side, LCAOrbitalSet replace LCOrbitalSet and LCOrbitalSetOpt
///////////////////////////////////////////////////////////////////////////////////////////////////
virtual bool is_of_type_LCOrbitalSetOpt() const { return false; }
/** evaluate the values, gradients and laplacians of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param d2logdet laplacians
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
ValueMatrix_t& d2logdet) = 0;
/** evaluate the values, gradients and hessians of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param grad_grad_logdet hessians
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
HessMatrix_t& grad_grad_logdet);
/** evaluate the values, gradients, hessians and third derivatives of this single-particle orbital for [first,last) particles
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param logdet determinant matrix to be inverted
* @param dlogdet gradients
* @param grad_grad_logdet hessians
* @param grad_grad_grad_logdet third derivatives
*
*/
virtual void evaluate_notranspose(const ParticleSet& P,
int first,
int last,
ValueMatrix_t& logdet,
GradMatrix_t& dlogdet,
HessMatrix_t& grad_grad_logdet,
GGGMatrix_t& grad_grad_grad_logdet);
/** evaluate the gradients of this single-particle orbital
* for [first,last) target particles with respect to the given source particle
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param iat_src source particle index
* @param gradphi gradients
*
*/
virtual void evaluateGradSource(const ParticleSet& P,
int first,
int last,
const ParticleSet& source,
int iat_src,
GradMatrix_t& gradphi);
/** evaluate the gradients of values, gradients, laplacians of this single-particle orbital
* for [first,last) target particles with respect to the given source particle
* @param P current ParticleSet
* @param first starting index of the particles
* @param last ending index of the particles
* @param iat_src source particle index
* @param gradphi gradients of values
* @param grad_grad_phi gradients of gradients
* @param grad_lapl_phi gradients of laplacians
*
*/
virtual void evaluateGradSource(const ParticleSet& P,
int first,
int last,
const ParticleSet& source,
int iat_src,
GradMatrix_t& grad_phi,
HessMatrix_t& grad_grad_phi,
GradMatrix_t& grad_lapl_phi);
/** access the k point related to the given orbital */
virtual PosType get_k(int orb) { return PosType(); }
/** make a clone of itself
* every derived class must implement this to have threading working correctly.
*/
virtual SPOSet* makeClone() const;
/** Used only by cusp correction in AOS LCAO.
* Ye: the SoA LCAO moves all this responsibility to the builder.
* This interface should be removed with AoS.
*/
virtual bool transformSPOSet() { return true; }
/** finalize the construction of SPOSet
*
* for example, classes serving accelerators may need to transfer data from host to device
* after the host side objects are built.
*/
virtual void finalizeConstruction() {}
// Routine to set up data for the LCOrbitalSetOpt child class specifically
// Should be left empty for other derived classes
// Ye: This interface should be removed with AoS.
virtual void init_LCOrbitalSetOpt(const double mix_factor = 0.0){};
// Routine to update internal data for the LCOrbitalSetOpt child class specifically
// Should be left empty for other derived classes
// Ye: This interface should be removed with AoS.
virtual void rotate_B(const std::vector<RealType>& rot_mat){};
#ifdef QMC_CUDA
using CTS = CUDAGlobalTypes;
//////////////////////////////////////////
// Walker-parallel vectorized functions //
//////////////////////////////////////////
virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool) {}
virtual void evaluate(std::vector<Walker_t*>& walkers, int iat, gpu::device_vector<CTS::ValueType*>& phi);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi,
gpu::device_vector<CTS::ValueType*>& grad_lapl_list,
int row_stride);
virtual void evaluate(std::vector<Walker_t*>& walkers,
std::vector<PosType>& new_pos,
gpu::device_vector<CTS::ValueType*>& phi,
gpu::device_vector<CTS::ValueType*>& grad_lapl_list,
int row_stride,
int k,
bool klinear);
virtual void evaluate(std::vector<PosType>& pos, gpu::device_vector<CTS::RealType*>& phi);
virtual void evaluate(std::vector<PosType>& pos, gpu::device_vector<CTS::ComplexType*>& phi);
#endif
#if !defined(ENABLE_SOA)
protected:
bool putOccupation(xmlNodePtr occ_ptr);
bool putFromXML(xmlNodePtr coeff_ptr);
bool putFromH5(const std::string& fname, xmlNodePtr coeff_ptr);
#endif
protected:
///true, if the derived class has non-zero ionic derivatives.
const bool ionDerivs;
///true if SPO is optimizable
const bool Optimizable;
///number of Single-particle orbitals
IndexType OrbitalSetSize;
/// Optimizable variables
opt_variables_type myVars;
///name of the class
std::string className;
};
typedef SPOSet* SPOSetPtr;
} // namespace qmcplusplus
#endif
|
_phono3py.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <Python.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <numpy/arrayobject.h>
#include <lapack_wrapper.h>
#include <phonon.h>
#include <phonoc_array.h>
#include <phonoc_const.h>
#include <phonon3_h/fc3.h>
#include <phonon3_h/frequency_shift.h>
#include <phonon3_h/interaction.h>
#include <phonon3_h/imag_self_energy_with_g.h>
#include <phonon3_h/pp_collision.h>
#include <phonon3_h/collision_matrix.h>
#include <other_h/isotope.h>
#include <triplet_h/triplet.h>
#include <tetrahedron_method.h>
/* #define LIBFLAME */
#ifdef LIBFLAME
#include <flame_wrapper.h>
#endif
static PyObject * py_get_phonons_at_gridpoints(PyObject *self, PyObject *args);
static PyObject * py_get_interaction(PyObject *self, PyObject *args);
static PyObject * py_get_pp_collision(PyObject *self, PyObject *args);
static PyObject *
py_get_pp_collision_with_sigma(PyObject *self, PyObject *args);
static PyObject *
py_get_imag_self_energy_with_g(PyObject *self, PyObject *args);
static PyObject *
py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args);
static PyObject * py_get_frequency_shift_at_bands(PyObject *self,
PyObject *args);
static PyObject * py_get_collision_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_reducible_collision_matrix(PyObject *self,
PyObject *args);
static PyObject * py_symmetrize_collision_matrix(PyObject *self,
PyObject *args);
static PyObject * py_distribute_fc3(PyObject *self, PyObject *args);
static PyObject * py_get_isotope_strength(PyObject *self, PyObject *args);
static PyObject * py_get_thm_isotope_strength(PyObject *self, PyObject *args);
static PyObject *
py_set_permutation_symmetry_fc3(PyObject *self, PyObject *args);
static PyObject *
py_set_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args);
static PyObject * py_set_permutation_symmetry_fc3(PyObject *self,
PyObject *args);
static PyObject * py_transpose_compact_fc3(PyObject *self, PyObject *args);
static PyObject * py_get_neighboring_gird_points(PyObject *self, PyObject *args);
static PyObject * py_set_integration_weights(PyObject *self, PyObject *args);
static PyObject *
py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args);
static PyObject * py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args);
static PyObject *
py_set_triplets_integration_weights(PyObject *self, PyObject *args);
static PyObject *
py_set_triplets_integration_weights_with_sigma(PyObject *self, PyObject *args);
static PyObject *
py_diagonalize_collision_matrix(PyObject *self, PyObject *args);
static PyObject * py_pinv_from_eigensolution(PyObject *self, PyObject *args);
static PyObject * py_get_default_colmat_solver(PyObject *self, PyObject *args);
#ifdef LIBFLAME
static PyObject * py_inverse_collision_matrix_libflame(PyObject *self, PyObject *args);
#endif
static void pinv_from_eigensolution(double *data,
const double *eigvals,
const int size,
const double cutoff,
const int pinv_method);
static void show_colmat_info(const PyArrayObject *collision_matrix_py,
const int i_sigma,
const int i_temp,
const long adrs_shift);
struct module_state {
PyObject *error;
};
#if PY_MAJOR_VERSION >= 3
#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
#else
#define GETSTATE(m) (&_state)
static struct module_state _state;
#endif
static PyObject *
error_out(PyObject *m) {
struct module_state *st = GETSTATE(m);
PyErr_SetString(st->error, "something bad happened");
return NULL;
}
static PyMethodDef _phono3py_methods[] = {
{"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
{"phonons_at_gridpoints",
py_get_phonons_at_gridpoints,
METH_VARARGS,
"Set phonons at grid points"},
{"interaction",
(PyCFunction)py_get_interaction,
METH_VARARGS,
"Interaction of triplets"},
{"pp_collision",
(PyCFunction)py_get_pp_collision,
METH_VARARGS,
"Collision and ph-ph calculation"},
{"pp_collision_with_sigma",
(PyCFunction)py_get_pp_collision_with_sigma,
METH_VARARGS,
"Collision and ph-ph calculation for smearing method"},
{"imag_self_energy_with_g",
(PyCFunction)py_get_imag_self_energy_with_g,
METH_VARARGS,
"Imaginary part of self energy at frequency points with g"},
{"detailed_imag_self_energy_with_g",
(PyCFunction)py_get_detailed_imag_self_energy_with_g,
METH_VARARGS,
"Detailed contribution to imaginary part of self energy at frequency points with g"},
{"frequency_shift_at_bands",
(PyCFunction)py_get_frequency_shift_at_bands,
METH_VARARGS,
"Phonon frequency shift from third order force constants"},
{"collision_matrix",
(PyCFunction)py_get_collision_matrix,
METH_VARARGS,
"Collision matrix with g"},
{"reducible_collision_matrix",
(PyCFunction)py_get_reducible_collision_matrix,
METH_VARARGS,
"Collision matrix with g for reducible grid points"},
{"symmetrize_collision_matrix",
(PyCFunction)py_symmetrize_collision_matrix,
METH_VARARGS,
"Symmetrize collision matrix"},
{"distribute_fc3",
(PyCFunction)py_distribute_fc3,
METH_VARARGS,
"Distribute least fc3 to full fc3"},
{"isotope_strength",
(PyCFunction)py_get_isotope_strength,
METH_VARARGS,
"Isotope scattering strength"},
{"thm_isotope_strength",
(PyCFunction)py_get_thm_isotope_strength,
METH_VARARGS,
"Isotope scattering strength for tetrahedron_method"},
{"permutation_symmetry_fc3",
(PyCFunction)py_set_permutation_symmetry_fc3,
METH_VARARGS,
"Set permutation symmetry for fc3"},
{"permutation_symmetry_compact_fc3",
(PyCFunction)py_set_permutation_symmetry_compact_fc3,
METH_VARARGS,
"Set permutation symmetry for compact-fc3"},
{"transpose_compact_fc3",
(PyCFunction)py_transpose_compact_fc3,
METH_VARARGS,
"Transpose compact fc3"},
{"neighboring_grid_points",
(PyCFunction)py_get_neighboring_gird_points,
METH_VARARGS,
"Neighboring grid points by relative grid addresses"},
{"integration_weights",
(PyCFunction)py_set_integration_weights,
METH_VARARGS,
"Integration weights of tetrahedron method"},
{"triplets_reciprocal_mesh_at_q",
(PyCFunction)py_tpl_get_triplets_reciprocal_mesh_at_q,
METH_VARARGS,
"Triplets on reciprocal mesh points at a specific q-point"},
{"BZ_triplets_at_q",
(PyCFunction)py_tpl_get_BZ_triplets_at_q,
METH_VARARGS,
"Triplets in reciprocal primitive lattice are transformed to those in BZ."},
{"triplets_integration_weights",
(PyCFunction)py_set_triplets_integration_weights,
METH_VARARGS,
"Integration weights of tetrahedron method for triplets"},
{"triplets_integration_weights_with_sigma",
(PyCFunction)py_set_triplets_integration_weights_with_sigma,
METH_VARARGS,
"Integration weights of smearing method for triplets"},
{"diagonalize_collision_matrix",
(PyCFunction)py_diagonalize_collision_matrix,
METH_VARARGS,
"Diagonalize and optionally pseudo-inverse using Lapack dsyev(d)"},
{"pinv_from_eigensolution",
(PyCFunction)py_pinv_from_eigensolution,
METH_VARARGS,
"Pseudo-inverse from eigensolution"},
{"default_colmat_solver",
(PyCFunction)py_get_default_colmat_solver,
METH_VARARGS,
"Return default collison matrix solver by integer value"},
#ifdef LIBFLAME
{"inverse_collision_matrix_libflame",
(PyCFunction)py_inverse_collision_matrix_libflame,
METH_VARARGS,
"Pseudo-inverse using libflame hevd"},
#endif
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static int _phono3py_traverse(PyObject *m, visitproc visit, void *arg) {
Py_VISIT(GETSTATE(m)->error);
return 0;
}
static int _phono3py_clear(PyObject *m) {
Py_CLEAR(GETSTATE(m)->error);
return 0;
}
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_phono3py",
NULL,
sizeof(struct module_state),
_phono3py_methods,
NULL,
_phono3py_traverse,
_phono3py_clear,
NULL
};
#define INITERROR return NULL
PyObject *
PyInit__phono3py(void)
#else
#define INITERROR return
void
init_phono3py(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("_phono3py", _phono3py_methods);
#endif
struct module_state *st;
if (module == NULL)
INITERROR;
st = GETSTATE(module);
st->error = PyErr_NewException("_phono3py.Error", NULL, NULL);
if (st->error == NULL) {
Py_DECREF(module);
INITERROR;
}
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
static PyObject * py_get_phonons_at_gridpoints(PyObject *self, PyObject *args)
{
PyArrayObject* py_frequencies;
PyArrayObject* py_eigenvectors;
PyArrayObject* py_phonon_done;
PyArrayObject* py_grid_points;
PyArrayObject* py_grid_address;
PyArrayObject* py_mesh;
PyArrayObject* py_shortest_vectors_fc2;
PyArrayObject* py_multiplicity_fc2;
PyArrayObject* py_positions_fc2;
PyArrayObject* py_fc2;
PyArrayObject* py_masses_fc2;
PyArrayObject* py_p2s_map_fc2;
PyArrayObject* py_s2p_map_fc2;
PyArrayObject* py_reciprocal_lattice;
PyArrayObject* py_born_effective_charge;
PyArrayObject* py_q_direction;
PyArrayObject* py_dielectric_constant;
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
double nac_factor;
double unit_conversion_factor;
double lambda;
char* uplo;
double (*born)[3][3];
double (*dielectric)[3];
double *q_dir;
double* freqs;
lapack_complex_double* eigvecs;
char* phonon_done;
int* grid_points;
int (*grid_address)[3];
int* mesh;
double* fc2;
double(*svecs_fc2)[27][3];
int* multi_fc2;
double (*positions_fc2)[3];
double* masses_fc2;
int* p2s_fc2;
int* s2p_fc2;
double (*rec_lat)[3];
double * dd_q0;
double (*G_list)[3];
int num_patom;
int num_satom;
int num_phonons;
int num_grid_points;
int num_G_points;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOdOOOOdOOds",
&py_frequencies,
&py_eigenvectors,
&py_phonon_done,
&py_grid_points,
&py_grid_address,
&py_mesh,
&py_fc2,
&py_shortest_vectors_fc2,
&py_multiplicity_fc2,
&py_positions_fc2,
&py_masses_fc2,
&py_p2s_map_fc2,
&py_s2p_map_fc2,
&unit_conversion_factor,
&py_born_effective_charge,
&py_dielectric_constant,
&py_reciprocal_lattice,
&py_q_direction,
&nac_factor,
&py_dd_q0,
&py_G_list,
&lambda,
&uplo)) {
return NULL;
}
freqs = (double*)PyArray_DATA(py_frequencies);
eigvecs = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
phonon_done = (char*)PyArray_DATA(py_phonon_done);
grid_points = (int*)PyArray_DATA(py_grid_points);
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc2 = (double*)PyArray_DATA(py_fc2);
svecs_fc2 = (double(*)[27][3])PyArray_DATA(py_shortest_vectors_fc2);
multi_fc2 = (int*)PyArray_DATA(py_multiplicity_fc2);
masses_fc2 = (double*)PyArray_DATA(py_masses_fc2);
p2s_fc2 = (int*)PyArray_DATA(py_p2s_map_fc2);
s2p_fc2 = (int*)PyArray_DATA(py_s2p_map_fc2);
rec_lat = (double(*)[3])PyArray_DATA(py_reciprocal_lattice);
num_patom = PyArray_DIMS(py_multiplicity_fc2)[1];
num_satom = PyArray_DIMS(py_multiplicity_fc2)[0];
num_phonons = PyArray_DIMS(py_frequencies)[0];
num_grid_points = PyArray_DIMS(py_grid_points)[0];
if ((PyObject*)py_born_effective_charge == Py_None) {
born = NULL;
} else {
born = (double(*)[3][3])PyArray_DATA(py_born_effective_charge);
}
if ((PyObject*)py_dielectric_constant == Py_None) {
dielectric = NULL;
} else {
dielectric = (double(*)[3])PyArray_DATA(py_dielectric_constant);
}
if ((PyObject*)py_q_direction == Py_None) {
q_dir = NULL;
} else {
q_dir = (double*)PyArray_DATA(py_q_direction);
if (fabs(q_dir[0]) < 1e-10 &&
fabs(q_dir[1]) < 1e-10 &&
fabs(q_dir[2]) < 1e-10) {
q_dir = NULL;
}
}
if ((PyObject*)py_dd_q0 == Py_None) {
dd_q0 = NULL;
} else {
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
}
if ((PyObject*)py_G_list == Py_None) {
G_list = NULL;
num_G_points = 0;
} else {
G_list = (double(*)[3])PyArray_DATA(py_G_list);
num_G_points = PyArray_DIMS(py_G_list)[0];
}
if ((PyObject*)py_positions_fc2 == Py_None) {
positions_fc2 = NULL;
} else {
positions_fc2 = (double(*)[3])PyArray_DATA(py_positions_fc2);
}
if (!dd_q0) {
phn_get_phonons_at_gridpoints(freqs,
eigvecs,
phonon_done,
num_phonons,
grid_points,
num_grid_points,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
rec_lat,
q_dir,
nac_factor,
uplo[0]);
} else {
phn_get_gonze_phonons_at_gridpoints(freqs,
eigvecs,
phonon_done,
num_phonons,
grid_points,
num_grid_points,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
positions_fc2,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
rec_lat,
q_dir,
nac_factor,
dd_q0,
G_list,
num_G_points,
lambda,
uplo[0]);
}
Py_RETURN_NONE;
}
static PyObject * py_get_interaction(PyObject *self, PyObject *args)
{
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_g_zero;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_grid_point_triplets;
PyArrayObject *py_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_fc3;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
double cutoff_frequency;
int symmetrize_fc3_q;
Darray *fc3_normal_squared;
Darray *freqs;
lapack_complex_double *eigvecs;
Iarray *triplets;
char* g_zero;
int *grid_address;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
int *band_indices;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOOid",
&py_fc3_normal_squared,
&py_g_zero,
&py_frequencies,
&py_eigenvectors,
&py_grid_point_triplets,
&py_grid_address,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
freqs = convert_to_darray(py_frequencies);
/* npy_cdouble and lapack_complex_double may not be compatible. */
/* So eigenvectors should not be used in Python side */
eigvecs = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = convert_to_iarray(py_grid_point_triplets);
g_zero = (char*)PyArray_DATA(py_g_zero);
grid_address = (int*)PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = (int*)PyArray_DATA(py_band_indices);
get_interaction(fc3_normal_squared,
g_zero,
freqs,
eigvecs,
triplets,
grid_address,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
symmetrize_fc3_q,
cutoff_frequency);
free(fc3_normal_squared);
free(freqs);
free(triplets);
Py_RETURN_NONE;
}
static PyObject * py_get_pp_collision(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_bz_map;
PyArrayObject *py_mesh;
PyArrayObject *py_fc3;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
PyArrayObject *py_temperatures;
double cutoff_frequency;
int is_NU;
int symmetrize_fc3_q;
double *gamma;
int (*relative_grid_address)[4][3];
double *frequencies;
lapack_complex_double *eigenvectors;
Iarray *triplets;
int *triplet_weights;
int *grid_address;
int *bz_map;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
Iarray *band_indices;
Darray *temperatures;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOOOOOOiid",
&py_gamma,
&py_relative_grid_address,
&py_frequencies,
&py_eigenvectors,
&py_triplets,
&py_triplet_weights,
&py_grid_address,
&py_bz_map,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&py_temperatures,
&is_NU,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = convert_to_iarray(py_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = convert_to_iarray(py_band_indices);
temperatures = convert_to_darray(py_temperatures);
ppc_get_pp_collision(gamma,
relative_grid_address,
frequencies,
eigenvectors,
triplets,
triplet_weights,
grid_address,
bz_map,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
temperatures,
is_NU,
symmetrize_fc3_q,
cutoff_frequency);
free(triplets);
triplets = NULL;
free(band_indices);
band_indices = NULL;
free(temperatures);
temperatures = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_pp_collision_with_sigma(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_fc3;
PyArrayObject *py_shortest_vectors;
PyArrayObject *py_multiplicities;
PyArrayObject *py_masses;
PyArrayObject *py_p2s_map;
PyArrayObject *py_s2p_map;
PyArrayObject *py_band_indices;
PyArrayObject *py_temperatures;
int is_NU;
int symmetrize_fc3_q;
double sigma;
double sigma_cutoff;
double cutoff_frequency;
double *gamma;
double *frequencies;
lapack_complex_double *eigenvectors;
Iarray *triplets;
int *triplet_weights;
int *grid_address;
int *mesh;
double *fc3;
double *svecs;
int *multi;
double *masses;
int *p2s;
int *s2p;
Iarray *band_indices;
Darray *temperatures;
int svecs_dims[3];
int i;
int is_compact_fc3;
if (!PyArg_ParseTuple(args, "OddOOOOOOOOOOOOOOiid",
&py_gamma,
&sigma,
&sigma_cutoff,
&py_frequencies,
&py_eigenvectors,
&py_triplets,
&py_triplet_weights,
&py_grid_address,
&py_mesh,
&py_fc3,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_p2s_map,
&py_s2p_map,
&py_band_indices,
&py_temperatures,
&is_NU,
&symmetrize_fc3_q,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
triplets = convert_to_iarray(py_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
fc3 = (double*)PyArray_DATA(py_fc3);
if (PyArray_DIMS(py_fc3)[0] == PyArray_DIMS(py_fc3)[1]) {
is_compact_fc3 = 0;
} else {
is_compact_fc3 = 1;
}
svecs = (double*)PyArray_DATA(py_shortest_vectors);
for (i = 0; i < 3; i++) {
svecs_dims[i] = PyArray_DIMS(py_shortest_vectors)[i];
}
multi = (int*)PyArray_DATA(py_multiplicities);
masses = (double*)PyArray_DATA(py_masses);
p2s = (int*)PyArray_DATA(py_p2s_map);
s2p = (int*)PyArray_DATA(py_s2p_map);
band_indices = convert_to_iarray(py_band_indices);
temperatures = convert_to_darray(py_temperatures);
ppc_get_pp_collision_with_sigma(gamma,
sigma,
sigma_cutoff,
frequencies,
eigenvectors,
triplets,
triplet_weights,
grid_address,
mesh,
fc3,
is_compact_fc3,
svecs,
svecs_dims,
multi,
masses,
p2s,
s2p,
band_indices,
temperatures,
is_NU,
symmetrize_fc3_q,
cutoff_frequency);
free(triplets);
triplets = NULL;
free(band_indices);
band_indices = NULL;
free(temperatures);
temperatures = NULL;
Py_RETURN_NONE;
}
static PyObject * py_get_imag_self_energy_with_g(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_grid_point_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_g;
PyArrayObject *py_g_zero;
double cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *gamma;
double *g;
char* g_zero;
double *frequencies;
int *grid_point_triplets;
int *triplet_weights;
if (!PyArg_ParseTuple(args, "OOOOOdOOd",
&py_gamma,
&py_fc3_normal_squared,
&py_grid_point_triplets,
&py_triplet_weights,
&py_frequencies,
&temperature,
&py_g,
&py_g_zero,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
gamma = (double*)PyArray_DATA(py_gamma);
g = (double*)PyArray_DATA(py_g);
g_zero = (char*)PyArray_DATA(py_g_zero);
frequencies = (double*)PyArray_DATA(py_frequencies);
grid_point_triplets = (int*)PyArray_DATA(py_grid_point_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
ise_get_imag_self_energy_at_bands_with_g(gamma,
fc3_normal_squared,
frequencies,
grid_point_triplets,
triplet_weights,
g,
g_zero,
temperature,
cutoff_frequency);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
Py_RETURN_NONE;
}
static PyObject *
py_get_detailed_imag_self_energy_with_g(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma_detail;
PyArrayObject *py_gamma_N;
PyArrayObject *py_gamma_U;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_grid_point_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_grid_address;
PyArrayObject *py_g;
PyArrayObject *py_g_zero;
double cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *gamma_detail;
double *gamma_N;
double *gamma_U;
double *g;
char* g_zero;
double *frequencies;
int *grid_point_triplets;
int *triplet_weights;
int *grid_address;
if (!PyArg_ParseTuple(args, "OOOOOOOOdOOd",
&py_gamma_detail,
&py_gamma_N,
&py_gamma_U,
&py_fc3_normal_squared,
&py_grid_point_triplets,
&py_triplet_weights,
&py_grid_address,
&py_frequencies,
&temperature,
&py_g,
&py_g_zero,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
gamma_detail = (double*)PyArray_DATA(py_gamma_detail);
gamma_N = (double*)PyArray_DATA(py_gamma_N);
gamma_U = (double*)PyArray_DATA(py_gamma_U);
g = (double*)PyArray_DATA(py_g);
g_zero = (char*)PyArray_DATA(py_g_zero);
frequencies = (double*)PyArray_DATA(py_frequencies);
grid_point_triplets = (int*)PyArray_DATA(py_grid_point_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
grid_address = (int*)PyArray_DATA(py_grid_address);
ise_get_detailed_imag_self_energy_at_bands_with_g(gamma_detail,
gamma_N,
gamma_U,
fc3_normal_squared,
frequencies,
grid_point_triplets,
triplet_weights,
grid_address,
g,
g_zero,
temperature,
cutoff_frequency);
free(fc3_normal_squared);
Py_RETURN_NONE;
}
static PyObject * py_get_frequency_shift_at_bands(PyObject *self,
PyObject *args)
{
PyArrayObject *py_shift;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_grid_point_triplets;
PyArrayObject *py_triplet_weights;
PyArrayObject *py_band_indices;
double epsilon, unit_conversion_factor, cutoff_frequency, temperature;
Darray *fc3_normal_squared;
double *shift;
double *frequencies;
int *band_indices;
int *grid_point_triplets;
int *triplet_weights;
if (!PyArg_ParseTuple(args, "OOOOOOdddd",
&py_shift,
&py_fc3_normal_squared,
&py_grid_point_triplets,
&py_triplet_weights,
&py_frequencies,
&py_band_indices,
&temperature,
&epsilon,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
shift = (double*)PyArray_DATA(py_shift);
frequencies = (double*)PyArray_DATA(py_frequencies);
band_indices = (int*)PyArray_DATA(py_band_indices);
grid_point_triplets = (int*)PyArray_DATA(py_grid_point_triplets);
triplet_weights = (int*)PyArray_DATA(py_triplet_weights);
get_frequency_shift_at_bands(shift,
fc3_normal_squared,
band_indices,
frequencies,
grid_point_triplets,
triplet_weights,
epsilon,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
Py_RETURN_NONE;
}
static PyObject * py_get_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplets_map;
PyArrayObject *py_stabilized_gp_map;
PyArrayObject *py_g;
PyArrayObject *py_rotated_grid_points;
PyArrayObject *py_rotations_cartesian;
double temperature, unit_conversion_factor, cutoff_frequency;
Darray *fc3_normal_squared;
double *collision_matrix;
double *g;
double *frequencies;
int *triplets;
Iarray *triplets_map;
int *stabilized_gp_map;
Iarray *rotated_grid_points;
double *rotations_cartesian;
if (!PyArg_ParseTuple(args, "OOOOOOOOOddd",
&py_collision_matrix,
&py_fc3_normal_squared,
&py_frequencies,
&py_g,
&py_triplets,
&py_triplets_map,
&py_stabilized_gp_map,
&py_rotated_grid_points,
&py_rotations_cartesian,
&temperature,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
g = (double*)PyArray_DATA(py_g);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (int*)PyArray_DATA(py_triplets);
triplets_map = convert_to_iarray(py_triplets_map);
stabilized_gp_map = (int*)PyArray_DATA(py_stabilized_gp_map);
rotated_grid_points = convert_to_iarray(py_rotated_grid_points);
rotations_cartesian = (double*)PyArray_DATA(py_rotations_cartesian);
col_get_collision_matrix(collision_matrix,
fc3_normal_squared,
frequencies,
triplets,
triplets_map,
stabilized_gp_map,
rotated_grid_points,
rotations_cartesian,
g,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
free(triplets_map);
free(rotated_grid_points);
Py_RETURN_NONE;
}
static PyObject * py_get_reducible_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_fc3_normal_squared;
PyArrayObject *py_frequencies;
PyArrayObject *py_triplets;
PyArrayObject *py_triplets_map;
PyArrayObject *py_stabilized_gp_map;
PyArrayObject *py_g;
double temperature, unit_conversion_factor, cutoff_frequency;
Darray *fc3_normal_squared;
double *collision_matrix;
double *g;
double *frequencies;
int *triplets;
Iarray *triplets_map;
int *stabilized_gp_map;
if (!PyArg_ParseTuple(args, "OOOOOOOddd",
&py_collision_matrix,
&py_fc3_normal_squared,
&py_frequencies,
&py_g,
&py_triplets,
&py_triplets_map,
&py_stabilized_gp_map,
&temperature,
&unit_conversion_factor,
&cutoff_frequency)) {
return NULL;
}
fc3_normal_squared = convert_to_darray(py_fc3_normal_squared);
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
g = (double*)PyArray_DATA(py_g);
frequencies = (double*)PyArray_DATA(py_frequencies);
triplets = (int*)PyArray_DATA(py_triplets);
triplets_map = convert_to_iarray(py_triplets_map);
stabilized_gp_map = (int*)PyArray_DATA(py_stabilized_gp_map);
col_get_reducible_collision_matrix(collision_matrix,
fc3_normal_squared,
frequencies,
triplets,
triplets_map,
stabilized_gp_map,
g,
temperature,
unit_conversion_factor,
cutoff_frequency);
free(fc3_normal_squared);
free(triplets_map);
Py_RETURN_NONE;
}
static PyObject * py_symmetrize_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
double *collision_matrix;
int num_sigma;
int num_temp;
int num_grid_points;
int num_band;
int i, j, k, l, num_column;
long adrs_shift;
double val;
if (!PyArg_ParseTuple(args, "O",
&py_collision_matrix)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
num_sigma = PyArray_DIMS(py_collision_matrix)[0];
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_grid_points = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_points * num_band * 3;
} else {
num_column = num_grid_points * num_band;
}
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i, j, adrs_shift); */
#pragma omp parallel for schedule(guided) private(l, val)
for (k = 0; k < num_column; k++) {
for (l = k + 1; l < num_column; l++) {
val = (collision_matrix[adrs_shift + k * num_column + l] +
collision_matrix[adrs_shift + l * num_column + k]) / 2;
collision_matrix[adrs_shift + k * num_column + l] = val;
collision_matrix[adrs_shift + l * num_column + k] = val;
}
}
}
}
Py_RETURN_NONE;
}
static PyObject * py_get_isotope_strength(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_band_indices;
PyArrayObject *py_mass_variances;
int grid_point;
int num_grid_points;
double cutoff_frequency;
double sigma;
double *gamma;
double *frequencies;
lapack_complex_double *eigenvectors;
int *band_indices;
double *mass_variances;
int num_band;
int num_band0;
if (!PyArg_ParseTuple(args, "OiOOOOidd",
&py_gamma,
&grid_point,
&py_mass_variances,
&py_frequencies,
&py_eigenvectors,
&py_band_indices,
&num_grid_points,
&sigma,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
band_indices = (int*)PyArray_DATA(py_band_indices);
mass_variances = (double*)PyArray_DATA(py_mass_variances);
num_band = PyArray_DIMS(py_frequencies)[1];
num_band0 = PyArray_DIMS(py_band_indices)[0];
/* int i, j, k; */
/* double f, f0; */
/* int *weights, *ir_grid_points; */
/* double *integration_weights; */
/* ir_grid_points = (int*)malloc(sizeof(int) * num_grid_points); */
/* weights = (int*)malloc(sizeof(int) * num_grid_points); */
/* integration_weights = (double*)malloc(sizeof(double) * */
/* num_grid_points * num_band0 * num_band); */
/* for (i = 0; i < num_grid_points; i++) { */
/* ir_grid_points[i] = i; */
/* weights[i] = 1; */
/* for (j = 0; j < num_band0; j++) { */
/* f0 = frequencies[grid_point * num_band + band_indices[j]]; */
/* for (k = 0; k < num_band; k++) { */
/* f = frequencies[i * num_band + k]; */
/* integration_weights[i * num_band0 * num_band + */
/* j * num_band + k] = gaussian(f - f0, sigma); */
/* } */
/* } */
/* } */
/* get_thm_isotope_scattering_strength(gamma, */
/* grid_point, */
/* ir_grid_points, */
/* weights, */
/* mass_variances, */
/* frequencies, */
/* eigenvectors, */
/* num_grid_points, */
/* band_indices, */
/* num_band, */
/* num_band0, */
/* integration_weights, */
/* cutoff_frequency); */
/* free(ir_grid_points); */
/* free(weights); */
/* free(integration_weights); */
get_isotope_scattering_strength(gamma,
grid_point,
mass_variances,
frequencies,
eigenvectors,
num_grid_points,
band_indices,
num_band,
num_band0,
sigma,
cutoff_frequency);
Py_RETURN_NONE;
}
static PyObject * py_get_thm_isotope_strength(PyObject *self, PyObject *args)
{
PyArrayObject *py_gamma;
PyArrayObject *py_frequencies;
PyArrayObject *py_eigenvectors;
PyArrayObject *py_band_indices;
PyArrayObject *py_mass_variances;
PyArrayObject *py_ir_grid_points;
PyArrayObject *py_weights;
PyArrayObject *py_integration_weights;
int grid_point;
double cutoff_frequency;
double *gamma;
double *frequencies;
int *ir_grid_points;
int *weights;
lapack_complex_double *eigenvectors;
int *band_indices;
double *mass_variances;
int num_band;
int num_band0;
double *integration_weights;
int num_ir_grid_points;
if (!PyArg_ParseTuple(args, "OiOOOOOOOd",
&py_gamma,
&grid_point,
&py_ir_grid_points,
&py_weights,
&py_mass_variances,
&py_frequencies,
&py_eigenvectors,
&py_band_indices,
&py_integration_weights,
&cutoff_frequency)) {
return NULL;
}
gamma = (double*)PyArray_DATA(py_gamma);
frequencies = (double*)PyArray_DATA(py_frequencies);
ir_grid_points = (int*)PyArray_DATA(py_ir_grid_points);
weights = (int*)PyArray_DATA(py_weights);
eigenvectors = (lapack_complex_double*)PyArray_DATA(py_eigenvectors);
band_indices = (int*)PyArray_DATA(py_band_indices);
mass_variances = (double*)PyArray_DATA(py_mass_variances);
num_band = PyArray_DIMS(py_frequencies)[1];
num_band0 = PyArray_DIMS(py_band_indices)[0];
integration_weights = (double*)PyArray_DATA(py_integration_weights);
num_ir_grid_points = PyArray_DIMS(py_ir_grid_points)[0];
get_thm_isotope_scattering_strength(gamma,
grid_point,
ir_grid_points,
weights,
mass_variances,
frequencies,
eigenvectors,
num_ir_grid_points,
band_indices,
num_band,
num_band0,
integration_weights,
cutoff_frequency);
Py_RETURN_NONE;
}
static PyObject * py_distribute_fc3(PyObject *self, PyObject *args)
{
PyArrayObject *force_constants_third;
int target;
int source;
PyArrayObject *rotation_cart_inv;
PyArrayObject *atom_mapping_py;
double *fc3;
double *rot_cart_inv;
int *atom_mapping;
int num_atom;
if (!PyArg_ParseTuple(args, "OiiOO",
&force_constants_third,
&target,
&source,
&atom_mapping_py,
&rotation_cart_inv)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(force_constants_third);
rot_cart_inv = (double*)PyArray_DATA(rotation_cart_inv);
atom_mapping = (int*)PyArray_DATA(atom_mapping_py);
num_atom = PyArray_DIMS(atom_mapping_py)[0];
fc3_distribute_fc3(fc3,
target,
source,
atom_mapping,
num_atom,
rot_cart_inv);
Py_RETURN_NONE;
}
static PyObject *
py_set_permutation_symmetry_fc3(PyObject *self, PyObject *args)
{
PyArrayObject *py_fc3;
double *fc3;
int num_atom;
if (!PyArg_ParseTuple(args, "O", &py_fc3)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
num_atom = PyArray_DIMS(py_fc3)[0];
fc3_set_permutation_symmetry_fc3(fc3, num_atom);
Py_RETURN_NONE;
}
static PyObject *
py_set_permutation_symmetry_compact_fc3(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc3;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
double *fc3;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
int n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOO",
&py_fc3,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc3)[0];
n_satom = PyArray_DIMS(py_fc3)[1];
fc3_set_permutation_symmetry_compact_fc3(fc3,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom);
Py_RETURN_NONE;
}
static PyObject * py_transpose_compact_fc3(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc3;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
int t_type;
double *fc3;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
int n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOOi",
&py_fc3,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list,
&t_type)) {
return NULL;
}
fc3 = (double*)PyArray_DATA(py_fc3);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc3)[0];
n_satom = PyArray_DIMS(py_fc3)[1];
fc3_transpose_compact_fc3(fc3,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
t_type);
Py_RETURN_NONE;
}
static PyObject * py_get_neighboring_gird_points(PyObject *self, PyObject *args)
{
PyArrayObject *py_relative_grid_points;
PyArrayObject *py_grid_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
int *relative_grid_points;
int *grid_points;
int num_grid_points;
int (*relative_grid_address)[3];
int num_relative_grid_address;
int *mesh;
int (*bz_grid_address)[3];
int *bz_map;
int i;
if (!PyArg_ParseTuple(args, "OOOOOO",
&py_relative_grid_points,
&py_grid_points,
&py_relative_grid_address,
&py_mesh,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
relative_grid_points = (int*)PyArray_DATA(py_relative_grid_points);
grid_points = (int*)PyArray_DATA(py_grid_points);
num_grid_points = PyArray_DIMS(py_grid_points)[0];
relative_grid_address = (int(*)[3])PyArray_DATA(py_relative_grid_address);
num_relative_grid_address = PyArray_DIMS(py_relative_grid_address)[0];
mesh = (int*)PyArray_DATA(py_mesh);
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
#pragma omp parallel for
for (i = 0; i < num_grid_points; i++) {
thm_get_neighboring_grid_points
(relative_grid_points + i * num_relative_grid_address,
grid_points[i],
relative_grid_address,
num_relative_grid_address,
mesh,
bz_grid_address,
bz_map);
}
Py_RETURN_NONE;
}
static PyObject * py_set_integration_weights(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_frequency_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_grid_points;
PyArrayObject *py_frequencies;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
double *iw;
double *frequency_points;
int num_band0;
int (*relative_grid_address)[4][3];
int *mesh;
int *grid_points;
int num_gp;
int (*bz_grid_address)[3];
int *bz_map;
double *frequencies;
int num_band;
int i, j, k, bi;
int vertices[24][4];
double freq_vertices[24][4];
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_iw,
&py_frequency_points,
&py_relative_grid_address,
&py_mesh,
&py_grid_points,
&py_frequencies,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
grid_points = (int*)PyArray_DATA(py_grid_points);
num_gp = PyArray_DIMS(py_grid_points)[0];
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
#pragma omp parallel for private(j, k, bi, vertices, freq_vertices)
for (i = 0; i < num_gp; i++) {
for (j = 0; j < 24; j++) {
thm_get_neighboring_grid_points(vertices[j],
grid_points[i],
relative_grid_address[j],
4,
mesh,
bz_grid_address,
bz_map);
}
for (bi = 0; bi < num_band; bi++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
freq_vertices[j][k] = frequencies[vertices[j][k] * num_band + bi];
}
}
for (j = 0; j < num_band0; j++) {
iw[i * num_band0 * num_band + j * num_band + bi] =
thm_get_integration_weight(frequency_points[j], freq_vertices, 'I');
}
}
}
Py_RETURN_NONE;
}
static PyObject *
py_tpl_get_triplets_reciprocal_mesh_at_q(PyObject *self, PyObject *args)
{
PyArrayObject *py_map_triplets;
PyArrayObject *py_grid_address;
PyArrayObject *py_map_q;
PyArrayObject *py_mesh;
PyArrayObject *py_rotations;
int fixed_grid_number;
int is_time_reversal;
int swappable;
int (*grid_address)[3];
int *map_triplets_int;
int *map_q_int;
int *mesh_int;
int (*rot)[3][3];
int num_rot;
int num_ir;
if (!PyArg_ParseTuple(args, "OOOiOiOi",
&py_map_triplets,
&py_map_q,
&py_grid_address,
&fixed_grid_number,
&py_mesh,
&is_time_reversal,
&py_rotations,
&swappable)) {
return NULL;
}
grid_address = (int(*)[3])PyArray_DATA(py_grid_address);
map_triplets_int = (int*)PyArray_DATA(py_map_triplets);
map_q_int = (int*)PyArray_DATA(py_map_q);
mesh_int = (int*)PyArray_DATA(py_mesh);
rot = (int(*)[3][3])PyArray_DATA(py_rotations);
num_rot = PyArray_DIMS(py_rotations)[0];
num_ir = tpl_get_triplets_reciprocal_mesh_at_q(map_triplets_int,
map_q_int,
grid_address,
fixed_grid_number,
mesh_int,
is_time_reversal,
num_rot,
rot,
swappable);
return PyLong_FromLong((long) num_ir);
}
static PyObject * py_tpl_get_BZ_triplets_at_q(PyObject *self, PyObject *args)
{
PyArrayObject *py_triplets;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
PyArrayObject *py_map_triplets;
PyArrayObject *py_mesh;
int grid_point;
int (*triplets)[3];
int (*bz_grid_address)[3];
int *bz_map;
int *map_triplets;
int num_map_triplets;
int *mesh;
int num_ir;
if (!PyArg_ParseTuple(args, "OiOOOO",
&py_triplets,
&grid_point,
&py_bz_grid_address,
&py_bz_map,
&py_map_triplets,
&py_mesh)) {
return NULL;
}
triplets = (int(*)[3])PyArray_DATA(py_triplets);
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
map_triplets = (int*)PyArray_DATA(py_map_triplets);
num_map_triplets = PyArray_DIMS(py_map_triplets)[0];
mesh = (int*)PyArray_DATA(py_mesh);
num_ir = tpl_get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
return PyLong_FromLong((long) num_ir);
}
static PyObject *
py_set_triplets_integration_weights(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_iw_zero;
PyArrayObject *py_frequency_points;
PyArrayObject *py_relative_grid_address;
PyArrayObject *py_mesh;
PyArrayObject *py_triplets;
PyArrayObject *py_frequencies;
PyArrayObject *py_bz_grid_address;
PyArrayObject *py_bz_map;
double *iw;
char *iw_zero;
double *frequency_points;
int num_band0;
int (*relative_grid_address)[4][3];
int *mesh;
int (*triplets)[3];
int num_triplets;
int (*bz_grid_address)[3];
int *bz_map;
double *frequencies;
int num_band;
int num_iw;
if (!PyArg_ParseTuple(args, "OOOOOOOOO",
&py_iw,
&py_iw_zero,
&py_frequency_points,
&py_relative_grid_address,
&py_mesh,
&py_triplets,
&py_frequencies,
&py_bz_grid_address,
&py_bz_map)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
iw_zero = (char*)PyArray_DATA(py_iw_zero);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
relative_grid_address = (int(*)[4][3])PyArray_DATA(py_relative_grid_address);
mesh = (int*)PyArray_DATA(py_mesh);
triplets = (int(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
bz_grid_address = (int(*)[3])PyArray_DATA(py_bz_grid_address);
bz_map = (int*)PyArray_DATA(py_bz_map);
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
num_iw = PyArray_DIMS(py_iw)[0];
tpl_get_integration_weight(iw,
iw_zero,
frequency_points,
num_band0,
relative_grid_address,
mesh,
triplets,
num_triplets,
bz_grid_address,
bz_map,
frequencies,
num_band,
num_iw,
1,
0);
Py_RETURN_NONE;
}
static PyObject *
py_set_triplets_integration_weights_with_sigma(PyObject *self, PyObject *args)
{
PyArrayObject *py_iw;
PyArrayObject *py_iw_zero;
PyArrayObject *py_frequency_points;
PyArrayObject *py_triplets;
PyArrayObject *py_frequencies;
double sigma, sigma_cutoff;
double *iw;
char *iw_zero;
double *frequency_points;
int num_band0;
int (*triplets)[3];
int num_triplets;
double *frequencies;
int num_band;
int num_iw;
if (!PyArg_ParseTuple(args, "OOOOOdd",
&py_iw,
&py_iw_zero,
&py_frequency_points,
&py_triplets,
&py_frequencies,
&sigma,
&sigma_cutoff)) {
return NULL;
}
iw = (double*)PyArray_DATA(py_iw);
iw_zero = (char*)PyArray_DATA(py_iw_zero);
frequency_points = (double*)PyArray_DATA(py_frequency_points);
num_band0 = PyArray_DIMS(py_frequency_points)[0];
triplets = (int(*)[3])PyArray_DATA(py_triplets);
num_triplets = PyArray_DIMS(py_triplets)[0];
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
num_iw = PyArray_DIMS(py_iw)[0];
tpl_get_integration_weight_with_sigma(iw,
iw_zero,
sigma,
sigma_cutoff,
frequency_points,
num_band0,
triplets,
num_triplets,
frequencies,
num_band,
num_iw);
Py_RETURN_NONE;
}
#ifdef LIBFLAME
static PyObject * py_inverse_collision_matrix_libflame(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
int i_sigma, i_temp;
double cutoff;
double *collision_matrix;
double *eigvals;
int num_temp;
int num_ir_grid_points;
int num_band;
int num_column;
long adrs_shift;
if (!PyArg_ParseTuple(args, "OOiid",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_ir_grid_points = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
num_column = num_ir_grid_points * num_band * 3;
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
phonopy_pinv_libflame(collision_matrix + adrs_shift,
eigvals, num_column, cutoff);
Py_RETURN_NONE;
}
#endif
static PyObject *
py_diagonalize_collision_matrix(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
double cutoff;
int i_sigma, i_temp, is_pinv, solver;
double *collision_matrix;
double *eigvals;
int num_temp;
int num_grid_point;
int num_band;
int num_column, info;
long adrs_shift;
if (!PyArg_ParseTuple(args, "OOiidii",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff,
&solver,
&is_pinv)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
num_temp = PyArray_DIM(py_collision_matrix, 1);
num_grid_point = PyArray_DIM(py_collision_matrix, 2);
num_band = PyArray_DIM(py_collision_matrix, 3);
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_point * num_band * 3;
} else {
num_column = num_grid_point * num_band;
}
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */
info = phonopy_dsyev(collision_matrix + adrs_shift,
eigvals, num_column, solver);
if (is_pinv) {
pinv_from_eigensolution(collision_matrix + adrs_shift,
eigvals, num_column, cutoff, 0);
}
return PyLong_FromLong((long) info);
}
static PyObject * py_pinv_from_eigensolution(PyObject *self, PyObject *args)
{
PyArrayObject *py_collision_matrix;
PyArrayObject *py_eigenvalues;
double cutoff;
int i_sigma, i_temp, pinv_method;
double *collision_matrix;
double *eigvals;
int num_temp;
int num_grid_point;
int num_band;
int num_column;
long adrs_shift;
if (!PyArg_ParseTuple(args, "OOiidi",
&py_collision_matrix,
&py_eigenvalues,
&i_sigma,
&i_temp,
&cutoff,
&pinv_method)) {
return NULL;
}
collision_matrix = (double*)PyArray_DATA(py_collision_matrix);
eigvals = (double*)PyArray_DATA(py_eigenvalues);
num_temp = PyArray_DIMS(py_collision_matrix)[1];
num_grid_point = PyArray_DIMS(py_collision_matrix)[2];
num_band = PyArray_DIMS(py_collision_matrix)[3];
if (PyArray_NDIM(py_collision_matrix) == 8) {
num_column = num_grid_point * num_band * 3;
} else {
num_column = num_grid_point * num_band;
}
adrs_shift = (i_sigma * num_column * num_column * num_temp +
i_temp * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i_sigma, i_temp, adrs_shift); */
pinv_from_eigensolution(collision_matrix + adrs_shift,
eigvals, num_column, cutoff, pinv_method);
Py_RETURN_NONE;
}
static PyObject * py_get_default_colmat_solver(PyObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, "")) {
return NULL;
}
#ifdef MKL_LAPACKE
return PyLong_FromLong((long) 1);
#else
return PyLong_FromLong((long) 4);
#endif
}
static void pinv_from_eigensolution(double *data,
const double *eigvals,
const int size,
const double cutoff,
const int pinv_method)
{
int i, ib, j, k, max_l, i_s, j_s;
double *tmp_data;
double e, sum;
int *l;
l = NULL;
tmp_data = NULL;
tmp_data = (double*)malloc(sizeof(double) * size * size);
#pragma omp parallel for
for (i = 0; i < size * size; i++) {
tmp_data[i] = data[i];
}
l = (int*)malloc(sizeof(int) * size);
max_l = 0;
for (i = 0; i < size; i++) {
if (pinv_method == 0) {
e = fabs(eigvals[i]);
} else {
e = eigvals[i];
}
if (e > cutoff) {
l[max_l] = i;
max_l++;
}
}
#pragma omp parallel for private(ib, j, k, i_s, j_s, sum)
for (i = 0; i < size / 2; i++) {
/* from front */
i_s = i * size;
for (j = i; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + i] = sum;
}
/* from back */
ib = size - i - 1;
i_s = ib * size;
for (j = ib; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + ib] = sum;
}
}
/* when size is odd */
if ((size % 2) == 1) {
i = (size - 1) / 2;
i_s = i * size;
for (j = i; j < size; j++) {
j_s = j * size;
sum = 0;
for (k = 0; k < max_l; k++) {
sum += tmp_data[i_s + l[k]] * tmp_data[j_s + l[k]] / eigvals[l[k]];
}
data[i_s + j] = sum;
data[j_s + i] = sum;
}
}
free(l);
l = NULL;
free(tmp_data);
tmp_data = NULL;
}
static void show_colmat_info(const PyArrayObject *py_collision_matrix,
const int i_sigma,
const int i_temp,
const long adrs_shift)
{
int i;
printf(" Array_shape:(");
for (i = 0; i < PyArray_NDIM(py_collision_matrix); i++) {
printf("%d", (int)PyArray_DIM(py_collision_matrix, i));
if (i < PyArray_NDIM(py_collision_matrix) - 1) {
printf(",");
} else {
printf("), ");
}
}
printf("Data shift:%ld [%d, %d]\n", adrs_shift, i_sigma, i_temp);
}
|
4103.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.