source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
taskwait-depend-1.c | void
foo (int *p)
{
#pragma omp taskwait depend(iterator(i = 0:16) , in : p[i]) depend(out : p[32])
}
void
bar (int *p)
{
#pragma omp taskwait depend(mutexinoutset : p[0]) /* { dg-error "'mutexinoutset' kind in 'depend' clause on a 'taskwait' construct" } */
}
|
ZQ_CNN_MTCNN.h | #ifndef _ZQ_CNN_MTCNN_H_
#define _ZQ_CNN_MTCNN_H_
#pragma once
#include "ZQ_CNN_Net.h"
#include "ZQ_CNN_BBoxUtils.h"
#include <omp.h>
namespace ZQ
{
class ZQ_CNN_MTCNN
{
public:
using string = std::string;
ZQ_CNN_MTCNN()
{
min_size = 60;
thresh[0] = 0.6;
thresh[1] = 0.7;
thresh[2] = 0.7;
nms_thresh[0] = 0.6;
nms_thresh[1] = 0.7;
nms_thresh[2] = 0.7;
width = 0;
height = 0;
factor = 0.709;
pnet_overlap_thresh_count = 4;
pnet_size = 12;
pnet_stride = 2;
special_handle_very_big_face = false;
force_run_pnet_multithread = false;
show_debug_info = false;
}
~ZQ_CNN_MTCNN()
{
}
private:
std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet;
bool has_lnet;
int thread_num;
float thresh[3], nms_thresh[3];
int min_size;
int width, height;
float factor;
int pnet_overlap_thresh_count;
int pnet_size;
int pnet_stride;
int rnet_size;
int onet_size;
int lnet_size;
bool special_handle_very_big_face;
bool do_landmark;
float early_accept_thresh;
float nms_thresh_per_scale;
bool force_run_pnet_multithread;
std::vector<float> scales;
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images;
ZQ_CNN_Tensor4D_NHW_C_Align128bit input, rnet_image, onet_image;
bool show_debug_info;
public:
void TurnOnShowDebugInfo() { show_debug_info = true; }
void TurnOffShowDebugInfo() { show_debug_info = false; }
bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model,
const string& onet_param, const string& onet_model, int thread_num = 1,
bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "")
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
{
lnet.resize(thread_num);
}
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFrom(pnet_param, pnet_model) && rnet[i].LoadFrom(rnet_param, rnet_model) && onet[i].LoadFrom(onet_param, onet_model);
if (has_lnet && ret)
ret = lnet[i].LoadFrom(lnet_param, lnet_model);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
if (has_lnet)
{
lnet[0].GetInputDim(C, H, W);
lnet_size = H;
}
return ret;
}
bool InitFromBuffer(
const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len,
const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len,
const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len,
int thread_num = 1, bool has_lnet = false,
const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0)
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if(has_lnet)
lnet.resize(thread_num);
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len)
&& rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len)
&& onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len);
if (has_lnet && ret)
ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
return ret;
}
void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7,
float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709,
int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false,
bool do_landmark = true, float early_accept_thresh = 1.00)
{
min_size = __max(pnet_size, min_face_size);
thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh);
nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh);
scale_factor = __max(0.5, __min(0.97, scale_factor));
this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count);
this->pnet_size = pnet_size;
this->pnet_stride = pnet_stride;
this->special_handle_very_big_face = special_handle_very_big_face;
this->do_landmark = do_landmark;
this->early_accept_thresh = early_accept_thresh;
if (pnet_size == 20 && pnet_stride == 4)
nms_thresh_per_scale = 0.45;
else
nms_thresh_per_scale = 0.495;
if (width != w || height != h || factor != scale_factor)
{
scales.clear();
pnet_images.clear();
width = w; height = h;
float minside = __min(width, height);
int MIN_DET_SIZE = pnet_size;
float m = (float)MIN_DET_SIZE / min_size;
minside *= m;
while (minside > MIN_DET_SIZE)
{
scales.push_back(m);
minside *= factor;
m *= factor;
}
minside = __min(width, height);
int count = scales.size();
for (int i = scales.size() - 1; i >= 0; i--)
{
if (ceil(scales[i] * minside) <= pnet_size)
{
count--;
}
}
if (special_handle_very_big_face)
{
if (count > 2)
count--;
scales.resize(count);
if (count > 0)
{
float last_size = ceil(scales[count - 1] * minside);
for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2)
{
scales.push_back((float)tmp_size / minside);
count++;
}
}
scales.push_back((float)pnet_size / minside);
count++;
}
else
{
scales.push_back((float)pnet_size / minside);
count++;
}
pnet_images.resize(count);
}
}
bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (!has_lnet || !do_landmark)
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, results))
return false;
double t4 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n",
1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3));
}
}
else
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
double t4 = omp_get_wtime();
if (!_Lnet_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
}
return true;
}
bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (!has_lnet || !do_landmark)
{
return false;
}
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
double t4 = omp_get_wtime();
if (!_Lnet106_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
return true;
}
private:
void _compute_Pnet_single_thread(std::vector<std::vector<float>>& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
for (int i = 0; i < scale_num; i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
double t10 = omp_get_wtime();
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
double t11 = omp_get_wtime();
if (scales[i] != 1)
pnet[0].Forward(pnet_images[i]);
else
pnet[0].Forward(input);
double t12 = omp_get_wtime();
if (show_debug_info)
printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n",
i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11));
const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1");
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if(row < mapH[i] && col < mapW[i])
maps[i][row*mapW[i] + col] = *p;
p += scorePixStep;
}
}
}
}
void _compute_Pnet_multi_thread(std::vector<std::vector<float>>& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
std::vector<int> task_rect_off_x;
std::vector<int> task_rect_off_y;
std::vector<int> task_rect_width;
std::vector<int> task_rect_height;
std::vector<float> task_scale;
std::vector<int> task_scale_id;
int stride = pnet_stride;
const int block_size = 64 * stride;
int cellsize = pnet_size;
int border_size = cellsize - stride;
int overlap_border_size = cellsize / stride;
int jump_size = block_size - border_size;
for (int i = 0; i < scales.size(); i++)
{
int changeH = (int)ceil(height*scales[i]);
int changeW = (int)ceil(width*scales[i]);
if (changeH < pnet_size || changeW < pnet_size)
continue;
int block_H_num = 0;
int block_W_num = 0;
int start = 0;
while (start < changeH)
{
block_H_num++;
if (start + block_size >= changeH)
break;
start += jump_size;
}
start = 0;
while (start < changeW)
{
block_W_num++;
if (start + block_size >= changeW)
break;
start += jump_size;
}
for (int s = 0; s < block_H_num; s++)
{
for (int t = 0; t < block_W_num; t++)
{
int rect_off_x = t * jump_size;
int rect_off_y = s * jump_size;
int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x;
int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y;
if (rect_width >= cellsize && rect_height >= cellsize)
{
task_rect_off_x.push_back(rect_off_x);
task_rect_off_y.push_back(rect_off_y);
task_rect_width.push_back(rect_width);
task_rect_height.push_back(rect_height);
task_scale.push_back(scales[i]);
task_scale_id.push_back(i);
}
}
}
}
//
int task_num = task_scale.size();
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num);
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox)
{
if (thread_num <= 0)
return false;
double t1 = omp_get_wtime();
firstBbox.clear();
if (width != _width || height != _height)
return false;
if (!input.ConvertFromBGR(bgr_img, width, height, width * 3))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
std::vector<std::vector<float>> maps;
std::vector<int> mapH;
std::vector<int> mapW;
if (thread_num == 1 && !force_run_pnet_multithread)
{
pnet[0].TurnOffShowDebugInfo();
//pnet[0].TurnOnShowDebugInfo();
_compute_Pnet_single_thread(maps, mapH, mapW);
}
else
{
_compute_Pnet_multi_thread(maps, mapH, mapW);
}
ZQ_CNN_OrderScore order;
std::vector<std::vector<ZQ_CNN_BBox>> bounding_boxes(scales.size());
std::vector<std::vector<ZQ_CNN_OrderScore>> bounding_scores(scales.size());
const int block_size = 32;
int stride = pnet_stride;
int cellsize = pnet_size;
int border_size = cellsize / stride;
for (int i = 0; i < maps.size(); i++)
{
double t13 = omp_get_wtime();
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
int count = 0;
//score p
int scoreH = mapH[i];
int scoreW = mapW[i];
const float *p = &maps[i][0];
if (scoreW <= block_size && scoreH < block_size)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bounding_boxes[i].push_back(bbox);
bounding_scores[i].push_back(order);
count++;
}
p ++;
}
}
int before_count = bounding_boxes[i].size();
ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
else
{
int before_count = 0, after_count = 0;
int block_H_num = __max(1, scoreH / block_size);
int block_W_num = __max(1, scoreW / block_size);
int block_num = block_H_num*block_W_num;
int width_per_block = scoreW / block_W_num;
int height_per_block = scoreH / block_H_num;
std::vector<std::vector<ZQ_CNN_BBox>> tmp_bounding_boxes(block_num);
std::vector<std::vector<ZQ_CNN_OrderScore>> tmp_bounding_scores(block_num);
std::vector<int> block_start_w(block_num), block_end_w(block_num);
std::vector<int> block_start_h(block_num), block_end_h(block_num);
for (int bh = 0; bh < block_H_num; bh++)
{
for (int bw = 0; bw < block_W_num; bw++)
{
int bb = bh * block_W_num + bw;
block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size);
block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block);
block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size);
block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block);
}
}
int chunk_size = ceil(block_num / thread_num);
#pragma omp parallel for schedule(static, chunk_size) num_threads(thread_num)
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
count = 0;
for (int bb = 0; bb < block_num; bb++)
{
std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin();
for (; it != tmp_bounding_boxes[bb].end(); it++)
{
if ((*it).exist)
{
bounding_boxes[i].push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
bounding_scores[i].push_back(order);
count++;
}
}
}
//ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0);
after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
}
std::vector<ZQ_CNN_OrderScore> firstOrderScore;
int count = 0;
for (int i = 0; i < scales.size(); i++)
{
std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin();
for (; it != bounding_boxes[i].end(); it++)
{
if ((*it).exist)
{
firstBbox.push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
firstOrderScore.push_back(order);
count++;
}
}
}
//the first stage's nms
if (count < 1) return false;
double t15 = omp_get_wtime();
ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1);
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true);
double t16 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms\n", 1000 * (t16 - t15));
if (show_debug_info)
printf("first stage candidate count: %d\n", count);
double t3 = omp_get_wtime();
if (show_debug_info)
printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2));
return true;
}
bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox)
{
double t3 = omp_get_wtime();
secondBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin();
std::vector<ZQ_CNN_OrderScore> secondScore;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int r_count = 0;
for (; it != firstBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height || rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
r_count++;
secondBbox.push_back(*it);
}
}
}
int batch_size = 64;
int per_num = ceil((float)r_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)r_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num);
std::vector<std::vector<int>> task_src_off_x(need_thread_num);
std::vector<std::vector<int>> task_src_off_y(need_thread_num);
std::vector<std::vector<int>> task_src_rect_w(need_thread_num);
std::vector<std::vector<int>> task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox>> task_secondBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(r_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_secondBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_secondBbox[i][j] = secondBbox[st_id + j];
}
}
}
if (thread_num == 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[0].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[thread_id].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_secondBbox[i].size();
}
secondBbox.resize(count);
secondScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_secondBbox[i].size(); j++)
{
secondBbox[id] = task_secondBbox[i][j];
secondScore[id].score = secondBbox[id].score;
secondScore[id].oriOrder = id;
id++;
}
}
//ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union");
ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min");
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true);
count = secondBbox.size();
double t4 = omp_get_wtime();
if (show_debug_info)
printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count);
if (show_debug_info)
printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3));
return true;
}
bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox)
{
double t4 = omp_get_wtime();
thirdBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin();
std::vector<ZQ_CNN_OrderScore> thirdScore;
std::vector<ZQ_CNN_BBox> early_accept_thirdBbox;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int o_count = 0;
for (; it != secondBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height || rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
if (!do_landmark && it->score > early_accept_thresh)
{
early_accept_thirdBbox.push_back(*it);
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
o_count++;
thirdBbox.push_back(*it);
}
}
}
}
int batch_size = 64;
int per_num = ceil((float)o_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)o_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num);
std::vector<std::vector<int>> task_src_off_x(need_thread_num);
std::vector<std::vector<int>> task_src_off_y(need_thread_num);
std::vector<std::vector<int>> task_src_rect_w(need_thread_num);
std::vector<std::vector<int>> task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox>> task_thirdBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(o_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_thirdBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_thirdBbox[i][j] = thirdBbox[st_id + j];
}
}
}
if (thread_num == 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[0].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[thread_id].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_thirdBbox[i].size();
}
thirdBbox.resize(count);
thirdScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_thirdBbox[i].size(); j++)
{
thirdBbox[id] = task_thirdBbox[i][j];
thirdScore[id].score = task_thirdBbox[i][j].score;
thirdScore[id].oriOrder = id;
id++;
}
}
ZQ_CNN_OrderScore order;
for (int i = 0; i < early_accept_thirdBbox.size(); i++)
{
order.score = early_accept_thirdBbox[i].score;
order.oriOrder = count++;
thirdScore.push_back(order);
thirdBbox.push_back(early_accept_thirdBbox[i]);
}
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false);
ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min");
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count);
if (show_debug_info)
printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox)
{
double t4 = omp_get_wtime();
fourthBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height || rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = 64;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int>> task_src_off_x(need_thread_num);
std::vector<std::vector<int>> task_src_off_y(need_thread_num);
std::vector<std::vector<int>> task_src_rect_w(need_thread_num);
std::vector<std::vector<int>> task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox>> task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j] = copy_fourthBbox[st_id + j];
}
}
}
if (thread_num == 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
fourthBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox)
{
double t4 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> fourthBbox;
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height || rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = 64;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int>> task_src_off_x(need_thread_num);
std::vector<std::vector<int>> task_src_off_y(need_thread_num);
std::vector<std::vector<int>> task_src_rect_w(need_thread_num);
std::vector<std::vector<int>> task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox106>> task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1;
task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2;
task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1;
task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2;
task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area;
task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score;
task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist;
}
}
}
resultBbox.resize(l_count);
for (int i = 0; i < l_count; i++)
{
resultBbox[i].col1 = fourthBbox[i].col1;
resultBbox[i].col2 = fourthBbox[i].col2;
resultBbox[i].row1 = fourthBbox[i].row1;
resultBbox[i].row2 = fourthBbox[i].row2;
resultBbox[i].score = fourthBbox[i].score;
resultBbox[i].exist = fourthBbox[i].exist;
resultBbox[i].area = fourthBbox[i].area;
}
if (thread_num == 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
resultBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
};
}
#endif
|
omp_bug5fix.c | /******************************************************************************
* FILE: omp_bug5fix.c
* DESCRIPTION:
* The problem in omp_bug5.c is that the first thread acquires locka and then
* tries to get lockb before releasing locka. Meanwhile, the second thread
* has acquired lockb and then tries to get locka before releasing lockb.
* This solution overcomes the deadlock by using locks correctly.
* AUTHOR: Blaise Barney 01/29/04
* LAST REVISED: 08/15/11
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1000000
#define PI 3.1415926535
#define DELTA .01415926535
int main (int argc, char *argv[])
{
int nthreads, tid, i;
float a[N], b[N];
omp_lock_t locka, lockb;
/* Initialize the locks */
omp_init_lock(&locka);
omp_init_lock(&lockb);
/* Initialize the arrays */
for (i=0; i<N; i++) {
a[i]=0;
b[i]=0;
}
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i)
{
/* Obtain thread number and number of threads */
tid = omp_get_thread_num();
#pragma omp master
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n", tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += DELTA * i;
omp_unset_lock(&locka);
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += DELTA + i;
omp_unset_lock(&lockb);
}
#pragma omp section
{
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += PI * i;
omp_unset_lock(&lockb);
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += PI + i;
omp_unset_lock(&locka);
}
} /* end of sections */
} /* end of parallel region */
printf("Sample results: %f %f %f %f\n",a[0],b[0],a[999999],b[999999]);
}
|
network.h | // == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// network.h: The main artificial neural network graph for mojo
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <iostream> // cout
#include <fstream>
#include <sstream>
#include <map>
#include <vector>
#include "layer.h"
#include "solver.h"
#include "activation.h"
#include "cost.h"
// hack for VS2010 to handle c++11 for(:)
#if (_MSC_VER == 1600)
#ifndef __for__
#define __for__ for each
#define __in__ in
#endif
#else
#ifndef __for__
#define __for__ for
#define __in__ :
#endif
#endif
#if defined(MOJO_CV2) || defined(MOJO_CV3)
#ifdef MOJO_CV2
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#pragma comment(lib, "opencv_core249")
#pragma comment(lib, "opencv_highgui249")
#pragma comment(lib, "opencv_imgproc249")
#pragma comment(lib, "opencv_contrib249")
#else //#ifdef MOJO_CV3
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#pragma comment(lib, "opencv_world310")
#endif
#endif
namespace mojo {
#if defined(MOJO_CV2) || defined(MOJO_CV3)
// forward declare these for data augmentation
cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false);
mojo::matrix cv2matrix(cv::Mat &m);
mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f);
#endif
// sleep needed for threading
#ifdef _WIN32
#include <windows.h>
void mojo_sleep(unsigned milliseconds) { Sleep(milliseconds); }
#else
#include <unistd.h>
void mojo_sleep(unsigned milliseconds) { usleep(milliseconds * 1000); }
#endif
#ifdef MOJO_PROFILE_LAYERS
#ifdef _WIN32
//* used for profiling layers
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
#else
void StartCounter(){}
double GetCounter(){return 0;}
#endif
#endif
//*/
void replace_str(std::string& str, const std::string& from, const std::string& to) {
if (from.empty())
return;
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
// returns Energy (euclidian distance / 2) and max index
float match_labels(const float *out, const float *target, const int size, int *best_index = NULL)
{
float E = 0;
int max_j = 0;
for (int j = 0; j<size; j++)
{
E += (out[j] - target[j])*(out[j] - target[j]);
if (out[max_j]<out[j]) max_j = j;
}
if (best_index) *best_index = max_j;
E *= 0.5;
return E;
}
// returns index of highest value (argmax)
int arg_max(const float *out, const int size)
{
int max_j = 0;
for (int j = 0; j<size; j++)
if (out[max_j]<out[j])
{max_j = j; }//std::cout <<j<<",";}
return max_j;
}
//----------------------------------------------------------------------
// network
// - class that holds all the layers and connection information
// - runs forward prediction
class network
{
int _size; // output size
int _thread_count; // determines number of layer sets (copys of layers)
int _internal_thread_count; // used for speeding up convolutions, etc..
static const int MAIN_LAYER_SET = 0;
// training related stuff
int _batch_size; // determines number of dW sets
float _skip_energy_level;
bool _smart_train;
std::vector <float> _running_E;
double _running_sum_E;
cost_function *_cost_function;
solver *_solver;
static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2;
static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1;
#ifdef MOJO_OMP
omp_lock_t _lock_batch;
void lock_batch() {omp_set_lock(&_lock_batch);}
void unlock_batch() {omp_unset_lock(&_lock_batch);}
void init_lock() {omp_init_lock(&_lock_batch);}
void destroy_lock() {omp_destroy_lock(&_lock_batch);}
int get_thread_num() {return omp_get_thread_num();}
#else
void lock_batch() {}
void unlock_batch() {}
void init_lock(){}
void destroy_lock() {}
int get_thread_num() {return 0;}
#endif
public:
// training progress stuff
int train_correct;
int train_skipped;
int stuck_counter;
int train_updates;
int train_samples;
int epoch_count;
int max_epochs;
float best_estimated_accuracy;
int best_accuracy_count;
float old_estimated_accuracy;
float estimated_accuracy;
// data augmentation stuff
int use_augmentation; // 0=off, 1=mojo, 2=opencv
int augment_x, augment_y;
int augment_h_flip, augment_v_flip;
mojo::pad_type augment_pad;
float augment_theta;
float augment_scale;
// here we have multiple sets of the layers to allow threading and batch processing
// a separate layer set is needed for each independent thread
std::vector< std::vector<base_layer *>> layer_sets;
std::map<std::string, int> layer_map; // name-to-index of layer for layer management
std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected
std::vector<matrix *> W; // these are the weights between/connecting layers
// these sets are needed because we need copies for each item in mini-batch
std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these
std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these
std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these
network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1)
{
_internal_thread_count=1;
_size=0;
_solver = new_solver(opt_name);
_cost_function = NULL;
//std::vector<base_layer *> layer_set;
//layer_sets.push_back(layer_set);
layer_sets.resize(1);
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
_running_sum_E = 0.;
train_correct = 0;
train_samples = 0;
train_skipped = 0;
epoch_count = 0;
max_epochs = 1000;
train_updates = 0;
estimated_accuracy = 0;
old_estimated_accuracy = 0;
stuck_counter = 0;
best_estimated_accuracy=0;
best_accuracy_count=0;
use_augmentation=0;
augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0;
augment_pad =mojo::edge;
augment_theta=0; augment_scale=0;
init_lock();
#ifdef USE_AF
af::setDevice(0);
af::info();
#endif
}
~network()
{
clear();
if (_cost_function) delete _cost_function;
if(_solver) delete _solver;
destroy_lock();
}
// call clear if you want to load a different configuration/model
void clear()
{
for(int i=0; i<(int)layer_sets.size(); i++)
{
__for__(auto l __in__ layer_sets[i]) delete l;
layer_sets.clear();
}
layer_sets.clear();
__for__(auto w __in__ W) if(w) delete w;
W.clear();
layer_map.clear();
layer_graph.clear();
}
// output size of final layer;
int out_size() {return _size;}
// get input size
bool get_input_size(int *w, int *h, int *c)
{
if(layer_sets[MAIN_LAYER_SET].size()<1) return false;
*w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans;
return true;
}
// sets up number of layer copies to run over multiple threads
void build_layer_sets()
{
int layer_cnt = (int)layer_sets.size();
if (layer_cnt<_thread_count) layer_sets.resize(_thread_count);
// ToDo: add shrink back / else if(layer_cnt>_thread_count)
sync_layer_sets();
}
inline int get_thread_count() {return _thread_count;}
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
void enable_external_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) threads = omp_get_num_procs();
_thread_count = threads;
if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count);
omp_set_nested(1);
#else
if (threads < 1) _thread_count = 1;
else _thread_count = threads;
if (threads > 1) bail("must define MOJO_OMP to used threading");
#endif
build_layer_sets();
}
void enable_internal_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count
if(threads<1) _internal_thread_count=1;
else _internal_thread_count=threads;
omp_set_nested(1);
#else
_internal_thread_count=1;
#endif
}
// when using threads, need to get bias data synched between all layer sets,
// call this after bias update in main layer set to copy the bias to the other sets
void sync_layer_sets()
{
for(int i=1; i<(int)layer_sets.size();i++)
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
(layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k];
}
// used to add some noise to weights
void heat_weights()
{
__for__(auto w __in__ W)
{
if (!w) continue;
matrix noise(w->cols, w->rows, w->chans);
noise.fill_random_normal(1.f/ noise.size());
//noise *= *w;
*w += noise;
}
}
// used to add some noise to weights
void remove_means()
{
__for__(auto w __in__ W)
if(w) w->remove_mean();
}
// used to push a layer back in the ORDERED list of layers
// if connect_all() is used, then the order of the push_back is used to connect the layers
// when forward or backward propogation, this order is used for the serialized order of calculations
// Layer_name must be unique.
bool push_back(const char *layer_name, const char *layer_config)
{
if(layer_map[layer_name]) return false; //already exists
base_layer *l=new_layer(layer_name, layer_config);
// set map to index
// make sure there is a 'set' to add layers to
if(layer_sets.size()<1)
{
std::vector<base_layer *> layer_set;
layer_sets.push_back(layer_set);
}
// make sure layer_sets are created
build_layer_sets();
layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size();
layer_sets[MAIN_LAYER_SET].push_back(l);
// upadate as potential last layer - so it sets the out size
_size=l->fan_size();
// add other copies needed for threading
for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config));
return true;
}
// connect 2 layers together and initialize weights
// top and bottom concepts are reversed from literature
// my 'top' is the input of a forward() pass and the 'bottom' is the output
// perhaps 'top' traditionally comes from the brain model, but my 'top' comes
// from reading order (information flows top to bottom)
void connect(const char *layer_name_top, const char *layer_name_bottom)
{
size_t i_top=layer_map[layer_name_top];
size_t i_bottom=layer_map[layer_name_bottom];
base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top];
base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom];
int w_i=(int)W.size();
matrix *w = l_bottom->new_connection(*l_top, w_i);
W.push_back(w);
layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom));
// need to build connections for other batches/threads
for(int i=1; i<(int)layer_sets.size(); i++)
{
l_top= layer_sets[i][i_top];
l_bottom= layer_sets[i][i_bottom];
delete l_bottom->new_connection(*l_top, w_i);
}
// we need to let solver prepare space for stateful information
if (_solver)
{
if (w)_solver->push_back(w->cols, w->rows, w->chans);
else _solver->push_back(1, 1, 1);
}
int fan_in=l_bottom->fan_size();
int fan_out=l_top->fan_size();
// ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet)
// after all connections, run through and do weights with correct fan count
// initialize weights - ToDo: separate and allow users to configure(?)
if (w && l_bottom->has_weights())
{
if (strcmp(l_bottom->p_act->name, "tanh") == 0)
{
// xavier : for tanh
float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
// float weight_base = (float)(std::sqrt(.25/( (double)fan_in)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0))
{
// xavier : for sigmoid
float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0)
|| (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0))
{
// he : for relu
float weight_base = (float)(std::sqrt(2. / (double)fan_in));
w->fill_random_normal(weight_base);
}
else
{
// lecun : orig
float weight_base = (float)(std::sqrt(1. / (double)fan_in));
w->fill_random_uniform(weight_base);
}
}
else if (w) w->fill(0);
}
// automatically connect all layers in the order they were provided
// easy way to go, but can't deal with branch/highway/resnet/inception types of architectures
void connect_all()
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++)
connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str());
}
int get_layer_index(const char *name)
{
for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0)
return j;
return -1;
}
// get the list of layers used (but not connection information)
std::string get_configuration()
{
std::string str;
// print all layer configs
for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string();
str += "\n";
// print layer links
if (layer_graph.size() <= 0) return str;
for (int j = 0; j < (int)layer_graph.size(); j++)
{
if (j % 3 == 0) str += " ";
if((j % 3 == 1)|| (j % 3 == 2)) str += ", ";
str +=layer_graph[j].first + "-" + layer_graph[j].second;
if (j % 3 == 2) str += "\n";
}
return str;
}
// performs forward pass and returns class index
// do not delete or modify the returned pointer. it is a live pointer to the last layer in the network
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
int predict_class(const float *in, int _thread_number = -1)
{
const float* out = forward(in, _thread_number);
return arg_max(out, out_size());
}
//----------------------------------------------------------------------------------------------------------
// F O R W A R D
//
// the main forward pass
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
// train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..)
float* forward(const float *in, int _thread_number=-1, int _train=0)
{
if(_thread_number<0) _thread_number=get_thread_num();
if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n");
if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n");
//std::cout << get_thread_num() << ",";
// clear nodes to zero & find input layers
std::vector<base_layer *> inputs;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer);
layer->set_threading(_internal_thread_count);
layer->node.fill(0.f);
}
// first layer assumed input. copy input to it
const float *in_ptr = in;
//base_layer * layer = layer_sets[_thread_number][0];
//memcpy(layer->node.x, in, sizeof(float)*layer->node.size());
__for__(auto layer __in__ inputs)
{
memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
in_ptr += layer->node.size();
}
//for (int i = 0; i < layer->node.size(); i++)
// layer_sets[_thread_number][0]->node.x[i] = in[i];
// for all layers
__for__(auto layer __in__ layer_sets[_thread_number])
{
// add bias and activate these outputs (they should all be summed up from other branches at this point)
//for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
layer->activate_nodes();
//for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
// send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition
__for__ (auto &link __in__ layer->forward_linked_layers)
{
// instead of having a list of paired connections, just use the shape of W to determine connections
// this is harder to read, but requires less look-ups
// the 'link' variable is a std::pair created during the connect() call for the layers
int connection_index = link.first;
base_layer *p_bottom = link.second;
// weight distribution of the signal to layers under it
#ifdef MOJO_PROFILE_LAYERS
StartCounter();
#endif
p_bottom->accumulate_signal(*layer, *W[connection_index], _train);
//if (p_bottom->has_weights())
//for(int j=0; j<layer->node.chans; j++)
//int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
#ifdef MOJO_PROFILE_LAYERS
std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n";
#endif
}
}
// return pointer to float * result from last layer
/* std::cout << "out:";
for (int i = 0; i < 10; i++)
{
std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<",";
}
std::cout << "\n";
*/
return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x;
}
//----------------------------------------------------------------------------------------------------------
// W R I T E
//
// write parameters to stream/file
// note that this does not persist intermediate training information that could be needed to 'pickup where you left off'
bool write(std::ofstream& ofs, bool binary = false, bool final = false)
{
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
ofs<<"mojo01" << std::endl;
ofs<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
ofs<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
if(binary)
{
ofs<<(int)1<<std::endl; // flags that this is binary data
// binary version to save space if needed
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if(layer_sets[MAIN_LAYER_SET][j]->use_bias())
ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
// save weights
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
else
{
ofs<<(int)0<<std::endl;
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
ofs << std::endl;
}
}
// save weights
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ofs << W[j]->x[i] << " ";
ofs << std::endl;
}
}
}
ofs.flush();
return true;
}
bool write(std::string &filename, bool binary = false, bool final = false) {
std::ofstream temp((const char *)filename.c_str(), std::ios::binary);
return write(temp, binary, final);
}//, std::ofstream::binary);
bool write(char *filename, bool binary = false, bool final = false)
{
std::string str= filename;
return write(str, binary, final);
}
// read network from a file/stream
std::string getcleanline(std::istream& ifs)
{
std::string s;
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
std::istream::sentry se(ifs, true);
std::streambuf* sb = ifs.rdbuf();
for (;;) {
int c = sb->sbumpc();
switch (c) {
case '\n':
return s;
case '\r':
if (sb->sgetc() == '\n') sb->sbumpc();
return s;
case EOF:
// Also handle the case when the last line has no line ending
if (s.empty()) ifs.setstate(std::ios::eofbit);
return s;
default:
s += (char)c;
}
}
}
//----------------------------------------------------------------------------------------------------------
// R E A D
//
bool read(std::istream &ifs)
{
if(!ifs.good()) return false;
std::string s;
s = getcleanline(ifs);
int layer_count;
int version = 0;
if (s.compare("mojo01")==0)
{
s = getcleanline(ifs);
layer_count = atoi(s.c_str());
version = 1;
}
else if (s.find("mojo:") == 0)
{
version = -1;
int cnt = 1;
while (!ifs.eof())
{
s = getcleanline(ifs);
if (s.empty()) continue;
if(s[0]=='#') continue;
push_back(int2str(cnt).c_str(), s.c_str());
cnt++;
}
connect_all();
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
else
layer_count = atoi(s.c_str());
// read layer def
std::string layer_name;
std::string layer_def;
for (auto i=0; i<layer_count; i++)
{
layer_name = getcleanline(ifs);
layer_def = getcleanline(ifs);
push_back(layer_name.c_str(),layer_def.c_str());
}
// read graph
int graph_count;
ifs>>graph_count;
getline(ifs,s); // get endline
if (graph_count <= 0)
{
connect_all();
}
else
{
std::string layer_name1;
std::string layer_name2;
for (auto i=0; i<graph_count; i++)
{
layer_name1= getcleanline(ifs);
layer_name2 = getcleanline(ifs);
connect(layer_name1.c_str(),layer_name2.c_str());
}
}
int binary;
s=getcleanline(ifs); // get endline
binary = atoi(s.c_str());
// binary version to save space if needed
if(binary==1)
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
//int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
//int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
//for (int i = 0; i < layer_sets[MAIN_LAYER_SET][j]->bias.size(); i++)
ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
}
else if(binary==0)// text version
{
// read bias
for(int j=0; j<layer_count; j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
// int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
// int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
// for (int i = 0; i < c; i++)
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
{
ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k];
//std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ",";
}
ifs.ignore();// getline(ifs, s); // get endline
}
}
// read weights
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ifs >> W[j]->x[i];
ifs.ignore(); //getline(ifs, s); // get endline
}
}
}
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
bool read(std::string filename)
{
std::ifstream fs(filename.c_str(),std::ios::binary);
if (fs.is_open())
{
bool ret = read(fs);
fs.close();
return ret;
}
else return false;
}
bool read(const char *filename) { return read(std::string(filename)); }
#ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed
// ===========================================================================
// training part
// ===========================================================================
// resets the state of all batches to 'free' state
void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); }
// sets up number of mini batches (storage for sets of weight deltas)
void set_mini_batch_size(int batch_cnt)
{
if (batch_cnt<1) batch_cnt = 1;
_batch_size = batch_cnt;
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
reset_mini_batch();
}
int get_mini_batch_size() { return _batch_size; }
// return index of next free batch
// or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call)
// or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees)
int get_next_open_batch()
{
int reserved = 0;
int filled = 0;
for (int i = 0; i<batch_open.size(); i++)
{
if (batch_open[i] == BATCH_FREE) return i;
if (batch_open[i] == BATCH_RESERVED) reserved++;
if (batch_open[i] == BATCH_COMPLETE) filled++;
}
if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves
if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete
bail("threading error"); // should not get here unless threading problem
}
//----------------------------------------------------------------------------------------------------------
// s y n c m i n i b a t c h
//
// apply all weights to first set of dW, then apply to model weights
void sync_mini_batch()
{
// need to ensure no batches in progress (reserved)
int next = get_next_open_batch();
if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock");
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// sum contributions
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
// if batch free, then make sure it is zero'd out because we will increment dW set [0]
if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dW_sets[0][w_index] += dW_sets[b][w_index];
}
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
// bias stuff... that needs to be fixed for conv layers perhaps
if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dbias_sets[0][k] += dbias_sets[b][k];
}
}
// update weights
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0)
if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10%
}
layer->update_bias(dbias_sets[0][k], _solver->learning_rate);
}
// prepare to start mini batch over
reset_mini_batch();
train_updates++; // could have no updates .. so this is not exact
sync_layer_sets();
}
// reserve_next.. is used to reserve a space in the minibatch for the existing training sample
int reserve_next_batch()
{
lock_batch();
int my_batch_index = -3;
while (my_batch_index < 0)
{
my_batch_index = get_next_open_batch();
if (my_batch_index >= 0) // valid index
{
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete
{
sync_mini_batch(); // resets _batch_index to 0
my_batch_index = get_next_open_batch();
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
// need to wait for ones in progress to finish
unlock_batch();
mojo_sleep(1);
lock_batch();
}
return -3;
}
float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;}
void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;}
void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();}
bool get_smart_training() {return _smart_train;}
void set_smart_training(bool _use_train) { _smart_train = _use_train;}
float get_smart_train_level() { return _skip_energy_level; }
void set_smart_train_level(float _level) { _skip_energy_level = _level; }
void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; }
int get_epoch() { return epoch_count; }
// goal here is to update the weights W.
// use w_new = w_old - alpha dE/dw
// E = sum: 1/2*||y-target||^2
// note y = f(x*w)
// dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev
// similarly for cross entropy
// ===========================================================================
// training part
// ===========================================================================
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 1;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = 0;
augment_scale = 0;
}
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 2;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = rotation_deg;
augment_scale = scale;
}
// call before starting training for current epoch
void start_epoch(std::string loss_function="mse")
{
_cost_function=new_cost_function(loss_function);
train_correct = 0;
train_skipped = 0;
train_updates = 0;
train_samples = 0;
if (epoch_count == 0) reset_solver();
// accuracy not improving .. slow learning
if(_smart_train && (best_accuracy_count > 4))
{
stuck_counter++;
set_learning_rate((0.5f)*get_learning_rate());
if (get_learning_rate() < 0.000001f)
{
// heat_weights();
set_learning_rate(0.000001f);
stuck_counter++;// end of the line.. so speed up end
}
best_accuracy_count = 0;
}
old_estimated_accuracy = estimated_accuracy;
estimated_accuracy = 0;
//_skip_energy_level = 0.05;
_running_sum_E = 0;
}
// time to stop?
bool elvis_left_the_building()
{
// 2 stuck x 4 non best accuracy to quit = 8 times no improvement
if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true;
else return false;
}
// call after putting all training samples through this epoch
bool end_epoch()
{
// run leftovers through mini-batch
sync_mini_batch();
epoch_count++;
// estimate accuracy of validation run
estimated_accuracy = 100.f*train_correct / train_samples;
if (train_correct > best_estimated_accuracy)
{
best_estimated_accuracy = (float)train_correct;
best_accuracy_count = 0;
stuck_counter = 0;
}
else best_accuracy_count++;
return elvis_left_the_building();
}
// if smart training was thinking about exiting, calling reset will make it think everything is OK
void reset_smart_training()
{
stuck_counter=0;
best_accuracy_count = 0;
best_estimated_accuracy = 0;
}
//----------------------------------------------------------------------------------------------------------
// u p d a t e _ s m a r t _ t r a i n
//
void update_smart_train(const float E, bool correct)
{
#ifdef MOJO_OMP
#pragma omp critical
#endif
{
train_samples++;
if (correct) train_correct++;
if (_smart_train)
{
_running_E.push_back(E);
_running_sum_E += E;
const int SMART_TRAIN_SAMPLE_SIZE = 1000;
int s = (int)_running_E.size();
if (s >= SMART_TRAIN_SAMPLE_SIZE)
{
_running_sum_E /= (double)s;
std::sort(_running_E.begin(), _running_E.end());
float top_fraction = (float)_running_sum_E*10.f; //10.
const float max_fraction = 0.75f;
const float min_fraction = 0.075f;// 0.03f;
if (top_fraction > max_fraction) top_fraction = max_fraction;
if (top_fraction < min_fraction) top_fraction = min_fraction;
int index = s - 1 - (int)(top_fraction*(s - 1));
if (_running_E[index] > 0) _skip_energy_level = _running_E[index];
_running_sum_E = 0;
_running_E.clear();
}
}
if (E > 0 && E < _skip_energy_level)
{
//std::cout << "E=" << E;
train_skipped++;
}
} // omp critical
}
// finish back propogation through the hidden layers
void backward_hidden(const int my_batch_index, const int thread_number)
{
const int layer_cnt = (int)layer_sets[thread_number].size();
const int last_layer_index = layer_cnt - 1;
base_layer *layer;// = layer_sets[thread_number][last_layer_index];
// update hidden layers
// start at lower layer and push information up to previous layer
// handle dropout first
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
// all the signals should be summed up to this layer by now, so we go through and take the grad of activiation
int nodes = layer->node.size();
// already did last layer, so skip it
if (k< last_layer_index)
for (int i = 0; i< nodes; i++)
layer->delta.x[i] *= layer->df(layer->node.x, i, nodes);
// now pass that signal upstream
__for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop
{
base_layer *p_top = link.second;
// note all the delta[connections[i].second] should have been calculated by time we get here
layer->distribute_delta(*p_top, *W[link.first]);
}
}
// update weights - shouldn't matter the direction we update these
// we can stay in backwards direction...
// it was not faster to combine distribute_delta and increment_w into the same loop
int size_W = (int)W.size();
dW_sets[my_batch_index].resize(size_W);
dbias_sets[my_batch_index].resize(layer_cnt);
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
base_layer *p_top = link.second;
int w_index = (int)link.first;
//if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue;
layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20%
// moved this out to sync_mini_batch();
//_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10%
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
dbias_sets[my_batch_index][k] = layer->delta;
}
// if all batches finished, update weights
lock_batch();
batch_open[my_batch_index] = BATCH_COMPLETE;
int next_index = get_next_open_batch();
if (next_index == BATCH_FILLED_COMPLETE) // all complete
sync_mini_batch(); // resets _batch_index to 0
unlock_batch();
}
mojo::matrix make_input(float *in, const int _thread_number)
{
mojo::matrix augmented_input;// = auto_augmentation();
std::vector<base_layer *> inputs;
int in_size = 0;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL)
{
inputs.push_back(layer);
in_size += layer->node.size();
}
}
if (use_augmentation > 0)
{
augmented_input.resize(in_size, 1, 1);
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
bool flip_h = ((rand() % 2)*augment_h_flip) ? true: false;
bool flip_v = ((rand() % 2)*augment_v_flip) ? true: false;
int shift_x = (rand() % (augment_x * 2 + 1)) - augment_x;
int shift_y = (rand() % (augment_y * 2 + 1)) - augment_y;
int offset = 0;
__for__(auto layer __in__ inputs)
{
//memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
//in_ptr += layer->node.size();
// copy input to matrix type
mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset);
if (m.rows > 1 && m.cols > 1)
{
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if ((augment_theta > 0 || augment_scale > 0))
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s);
#endif
if (flip_v)m = m.flip_cols();
if (flip_h) m = m.flip_rows();
mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad);
memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size());
offset += aug.size();
}
else
{
memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size());
offset += m.size();
}
}
// input = augmented_input.x;
}
else
{
augmented_input.resize(in_size, 1, 1);
memcpy(augmented_input.x, in, sizeof(float)*in_size);
}
return augmented_input;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N C L A S S
//
// after starting epoch, call this to train against a class label
// label_index must be 0 to out_size()-1
// for thread safety, you must pass in the thread_index if calling from different threads
bool train_class(float *in, int label_index, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("call allow_threads()");
const int thread_number = _thread_number;
/*
mojo::matrix augmented_input = make_input(in, thread_number);
/*/
float *input = in;
mojo::matrix augmented_input;
if (use_augmentation > 0)
{
//augment_h_flip = flip_h;
//augment_v_flip = flip_v;
// copy input to matrix type
mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in);
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if (augment_theta > 0 || augment_scale > 0)
{
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s);
}
#endif
if (augment_h_flip)
if ((rand() % 2) == 0)
m = m.flip_cols();
if (augment_v_flip)
if ((rand() % 2) == 0)
m = m.flip_rows();
augmented_input = m.shift((rand() % (augment_x * 2 + 1)) - augment_x, (rand() % (augment_y * 2 + 1)) - augment_y, augment_pad);
input = augmented_input.x;
}
//*/
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
forward(input, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
const int layer_delta_size = layer->delta.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
std::vector<float> target;
if((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)|| (std::string("brokemax").compare(layer->p_act->name) == 0))
target = std::vector<float>(layer_node_size, 0);
else
target = std::vector<float>(layer_node_size, -1);
if(label_index>=0 && label_index<layer_node_size) target[label_index] = 1;
//const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if(cost_activation_type>0)
layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]);
else
layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
float f= mse::cost(layer->node.x[j], target[j]);
E += f;//mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
if ((max_j_target == max_j_out)) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N T A R G E T
//
// after starting epoch, call this to train against a target vector
// for thread safety, you must pass in the thread_index if calling from different threads
// if positive=1, goal is to minimize the distance between in and target
bool train_target(float *in, float *target, int positive=1, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("need to enable OMP");
const int thread_number = _thread_number;
mojo::matrix augmented_input = make_input(in, thread_number);
float *input = augmented_input.x;
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
float *out=forward(in, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
//int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
// std::vector<float> target;
//if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0))
// target = std::vector<float>(layer_node_size, 0);
// else
// target = std::vector<float>(layer_node_size, -1);
// if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1;
const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("brokemax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if (positive) // want to minimize distance
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(layer->node.x[j] - target[j]);
else
layer->delta.x[j] = grad_fudge*_cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
}
else
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(1.f-abs(layer->node.x[j] - target[j]));
else
layer->delta.x[j] = grad_fudge*(1.f-abs(_cost_function->d_cost(layer->node.x[j], target[j])))*layer->df(layer->node.x, j, layer_node_size);
}
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
// L2 distance x 2
E += mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
// FIxME if ((max_j_target == max_j_out)) match = true;
if (E < 0.01 && positive) match = true;
else if (E > 0.1 && !positive) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
#else
float get_learning_rate() {return 0;}
void set_learning_rate(float alpha) {}
void train(float *in, float *target){}
void reset() {}
float get_smart_train_level() {return 0;}
void set_smart_train_level(float _level) {}
bool get_smart_train() { return false; }
void set_smart_train(bool _use) {}
#endif
};
}
|
dmg_fmt_plug.c | /*
* DMG cracker patch for JtR. Hacked together during August of 2012
* by Dhiru Kholia <dhiru.kholia at gmail.com>
*
* This software is
* Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* Copyright (c) 2015, magnum
* and is based on "dmg.c" from
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* References:
*
* http://lingrok.org/xref/syslinux/utils/isohybrid.c#apple_part_header
* http://www.dubeyko.com/development/FileSystems/HFSPLUS/hexdumps/hfsplus_volume_header.html
*/
/*
* Debug levels:
* 1 show what "test" hits
* 2 dump printables from the decrypted blocks
* 3 dump hex from the decrypted blocks
* 4 dump decrypted blocks to files (will overwrite with no mercy):
* dmg.debug.main main block
* dmg.debug alternate block (if present, this is the start block)
*/
//#define DMG_DEBUG 2
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dmg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dmg);
#else
#if AC_BUILT
#include "autoconfig.h"
#endif
#include <string.h>
#include <errno.h>
#if !AC_BUILT || HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <stdlib.h>
#include <stdint.h>
#include <sys/types.h>
#include <openssl/des.h>
#include "aes.h"
#include "hmac_sha.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#ifdef DMG_DEBUG
#define NEED_OS_FLOCK
#include "os.h"
#endif
#include "arch.h"
#include "jumbo.h"
#include "params.h"
#include "johnswap.h"
#include "common.h"
#include "formats.h"
#include "dmg_common.h"
#include "pbkdf2_hmac_sha1.h"
#ifdef DMG_DEBUG
#include <sys/file.h>
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
extern volatile int bench_running;
#endif
#include "loader.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmg"
#define FORMAT_NAME "Apple DMG"
#define FORMAT_TAG "$dmg$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " 3DES/AES"
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES/AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#undef HTONL
#define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \
((((unsigned long)(n) & 0xFF00)) << 8) | \
((((unsigned long)(n) & 0xFF0000)) >> 8) | \
((((unsigned long)(n) & 0xFF000000)) >> 24))
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[20];
unsigned int ivlen;
unsigned char iv[32];
int headerver;
unsigned char chunk[8192];
uint32_t encrypted_keyblob_size;
uint8_t encrypted_keyblob[128];
unsigned int len_wrapped_aes_key;
unsigned char wrapped_aes_key[296];
unsigned int len_hmac_sha1_key;
unsigned char wrapped_hmac_sha1_key[300];
char scp; /* start chunk present */
unsigned char zchunk[4096]; /* chunk #0 */
int cno;
int data_size;
unsigned int iterations;
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_align(sizeof(*cracked),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
char *p;
int headerver;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$dmg$" marker */
if ((p = strtokm(ctcopy, "*")) == NULL)
goto err;
headerver = atoi(p);
if (headerver == 2) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ivlen */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (atoi(p) > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted_keyblob_size */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 128)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted keyblob */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* chunk number */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* data_size */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if ((p = strtokm(NULL, "*")) == NULL) /* chunk */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if (res > 8192)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* scp */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
/* FIXME: which values are allowed here? */
if (res == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* zchunk */
goto err;
if (strlen(p) != 4096 * 2)
goto err;
}
}
else if (headerver == 1) {
if ((p = strtokm(NULL, "*")) == NULL) /* salt len */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_wrapped_aes_key */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 296)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* wrapped_aes_key */
goto err;
if (hexlenl(p, &extra) / 2 != res || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* len_hmac_sha1_key */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 300)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hmac_sha1_key */
goto err;
if (strlen(p) / 2 != res)
goto err;
}
else
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "*");
cs.headerver = atoi(p);
if (cs.headerver == 2) {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.ivlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.encrypted_keyblob_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.encrypted_keyblob_size; i++)
cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.cno = atoi(p);
p = strtokm(NULL, "*");
cs.data_size = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.data_size; i++)
cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.scp = atoi(p);
if (cs.scp == 1) {
p = strtokm(NULL, "*");
for (i = 0; i < 4096; i++)
cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
else {
p = strtokm(NULL, "*");
cs.saltlen = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_wrapped_aes_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_wrapped_aes_key; i++)
cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.len_hmac_sha1_key = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.len_hmac_sha1_key; i++)
cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if ((p = strtokm(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
if (cs.iterations == 0)
cs.iterations = 1000;
MEM_FREE(keeptr);
return (void *)&cs;
}
static int apple_des3_ede_unwrap_key1(const unsigned char *wrapped_key, const int wrapped_key_len, const unsigned char *decryptKey)
{
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 };
int outlen, i;
DES_set_key((DES_cblock*)(decryptKey + 0), &ks1);
DES_set_key((DES_cblock*)(decryptKey + 8), &ks2);
DES_set_key((DES_cblock*)(decryptKey + 16), &ks3);
DES_ede3_cbc_encrypt(wrapped_key, TEMP1, wrapped_key_len, &ks1, &ks2, &ks3,
(DES_cblock*)IV, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, wrapped_key_len, 8);
if (outlen < 0)
return 0;
for (i = 0; i < outlen; i++)
TEMP2[i] = TEMP1[outlen - i - 1];
outlen -= 8;
DES_ede3_cbc_encrypt(TEMP2 + 8, TEMP1, outlen, &ks1, &ks2, &ks3,
(DES_cblock*)TEMP2, DES_DECRYPT);
outlen = check_pkcs_pad(TEMP1, outlen, 8);
if (outlen < 0)
return 0;
return 1;
}
static void hash_plugin_check_hash(int index)
{
unsigned char hmacsha1_key_[20];
unsigned char aes_key_[32];
int j;
if (cur_salt->headerver == 1) {
#ifdef SIMD_COEF_32
unsigned char *derived_key, Derived_key[SSE_GROUP_SZ_SHA1][32];
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (uint32_t*)(Derived_key[i]);
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 20,
cur_salt->iterations, &(x.poutc), 32, 0);
#else
unsigned char derived_key[32];
const char *password = saved_key[index];
pbkdf2_sha1((const unsigned char*)password, strlen(password),
cur_salt->salt, 20, cur_salt->iterations, derived_key, 32, 0);
#endif
j = 0;
#ifdef SIMD_COEF_32
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
derived_key = Derived_key[j];
#endif
if (apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) &&
apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key)) {
cracked[index+j] = 1;
}
#ifdef SIMD_COEF_32
}
#endif
} else {
DES_key_schedule ks1, ks2, ks3;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
AES_KEY aes_decrypt_key;
unsigned char outbuf[8192 + 1];
unsigned char outbuf2[4096 + 1];
unsigned char iv[20];
#ifdef DMG_DEBUG
unsigned char *r;
#endif
const char nulls[8] = { 0 };
#ifdef SIMD_COEF_32
unsigned char *derived_key, Derived_key[SSE_GROUP_SZ_SHA1][32];
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (uint32_t*)(Derived_key[i]);
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 20,
cur_salt->iterations, &(x.poutc), 32, 0);
#else
unsigned char derived_key[32];
const char *password = saved_key[index];
pbkdf2_sha1((const unsigned char*)password, strlen(password),
cur_salt->salt, 20, cur_salt->iterations, derived_key, 32, 0);
#endif
j = 0;
#ifdef SIMD_COEF_32
for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) {
derived_key = Derived_key[j];
#endif
DES_set_key((DES_cblock*)(derived_key + 0), &ks1);
DES_set_key((DES_cblock*)(derived_key + 8), &ks2);
DES_set_key((DES_cblock*)(derived_key + 16), &ks3);
memcpy(iv, cur_salt->iv, 8);
DES_ede3_cbc_encrypt(cur_salt->encrypted_keyblob, TEMP1,
cur_salt->encrypted_keyblob_size, &ks1, &ks2, &ks3,
(DES_cblock*)iv, DES_DECRYPT);
memcpy(aes_key_, TEMP1, 32);
memcpy(hmacsha1_key_, TEMP1, 20);
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cur_salt->cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found!\n\n");
#endif
cracked[index+j] = 1;
}
/* These tests seem to be obsoleted by the 8xNULL test */
#ifdef DMG_DEBUG
/* </plist> is a pretty generic signature for Apple */
if (!cracked[index+j] && memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) {
if (!bench_running)
fprintf(stderr, "</plist> found!\n\n");
cracked[index+j] = 1;
}
/* Journalled HFS+ */
if (!cracked[index+j] && memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) {
if (!bench_running)
fprintf(stderr, "jrnlhfs+ found!\n\n");
cracked[index+j] = 1;
}
/* Handle compressed DMG files, CMIYC 2012 and self-made
samples. Is this test obsoleted by the </plist> one? */
if (!cracked[index+j] && (r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) {
unsigned int *u32Version = (unsigned int *)(r + 4);
if (HTONL(*u32Version) == 4) {
if (!bench_running)
fprintf(stderr, "koly found!\n\n");
cracked[index+j] = 1;
}
}
/* Handle VileFault sample images */
if (!cracked[index+j] && memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) {
if (!bench_running)
fprintf(stderr, "EFI PART found!\n\n");
cracked[index+j] = 1;
}
/* Apple is a good indication but it's short enough to
produce false positives */
if (!cracked[index+j] && memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) {
if (!bench_running)
fprintf(stderr, "Apple found!\n\n");
cracked[index+j] = 1;
}
#endif /* DMG_DEBUG */
/* Second buffer test. If present, *this* is the very first block of the DMG */
if (!cracked[index+j] && cur_salt->scp == 1) {
int cno = 0;
hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cno, 4, iv, 20);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf2, 4096, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found in alternate block!\n\n");
#endif
cracked[index+j] = 1;
}
#ifdef DMG_DEBUG
/* This test seem to be obsoleted by the 8xNULL test */
if (!cracked[index+j] && memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) {
if (!bench_running)
fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n");
cracked[index+j] = 1;
}
#endif /* DMG_DEBUG */
}
#ifdef DMG_DEBUG
/* Write block as hex, strings or raw to a file. */
if (cracked[index+j] && !bench_running) {
#if DMG_DEBUG == 4
int fd;
if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1)
perror("open()");
else {
#if FCNTL_LOCKS
struct flock lock = { 0 };
lock.l_type = F_WRLCK;
while (fcntl(fd, F_SETLKW, &lock)) {
if (errno != EINTR)
pexit("fcntl(F_WRLCK)");
}
#elif OS_FLOCK
while (flock(fd, LOCK_EX)) {
if (errno != EINTR)
pexit("flock(LOCK_EX)");
}
#endif
if ((write(fd, outbuf, cur_salt->data_size) == -1))
perror("write()");
if (cur_salt->scp == 1)
if ((write(fd, outbuf2, 4096) == -1))
perror("write()");
if (close(fd))
perror("close");
}
#endif
#if DMG_DEBUG == 3
dump_stuff(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_stuff(outbuf2, 4096);
}
#endif
#if DMG_DEBUG == 2
dump_text(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_text(outbuf2, 4096);
}
#endif
}
#endif /* DMG_DEBUG */
#ifdef SIMD_COEF_32
}
#endif
}
return;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
#ifdef DMG_DEBUG
//fprintf(stderr, "Blob size is %d bytes\n", cur_salt->data_size);
#endif
}
static void dmg_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
hash_plugin_check_hash(index);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_dmg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef DMG_DEBUG
FMT_NOT_EXACT |
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
dmg_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
dmg_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
csr_matop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix operation functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include <assert.h>
#include "seq_mv.h"
#include "csr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAdd:
* adds two CSR Matrices A and B and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixAddHost ( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, jcol, num_nonzeros;
HYPRE_Int pos;
HYPRE_Int *marker;
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
num_nonzeros = 0;
C_i[0] = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
C_i[ic+1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize(C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
HYPRE_Int exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
hypre_assert(exec != HYPRE_EXEC_UNSET);
hypre_CSRMatrix *C = NULL;
if (exec == HYPRE_EXEC_HOST)
{
C = hypre_CSRMatrixAddHost(A,B);
}
#if defined(HYPRE_USING_CUDA)
else
{
C = hypre_CSRMatrixAddDevice(A,B);
}
#endif
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixBigAdd:
* adds two CSR Matrices A and B and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_BigInt *C_j;
HYPRE_Int ia, ib, ic, num_nonzeros;
HYPRE_BigInt jcol;
HYPRE_Int pos;
HYPRE_Int *marker;
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
num_nonzeros = 0;
C_i[0] = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
C_i[ic+1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixBigInitialize(C);
C_j = hypre_CSRMatrixBigJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[ic]; ib < B_i[ic+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiply
* multiplies two CSR Matrices A and B and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros=0;
HYPRE_Int row_start, counter;
HYPRE_Complex a_entry, b_entry;
HYPRE_Int allsquare = 0;
HYPRE_Int max_num_threads;
HYPRE_Int *jj_count;
if (ncols_A != nrows_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
if (nrows_A == ncols_B) allsquare = 1;
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED);
max_num_threads = hypre_NumThreads();
jj_count = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, row_start, counter, a_entry, b_entry)
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, ii, jj;
HYPRE_Int size, rest, num_threads;
HYPRE_Int i1;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
size = nrows_A/num_threads;
rest = nrows_A - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST);
for (ib = 0; ib < ncols_B; ib++)
B_marker[ib] = -1;
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
C_i[ic] = num_nonzeros;
if (allsquare)
{
B_marker[ic] = ic;
num_nonzeros++;
}
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
ja = A_j[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
if (B_marker[jb] != ic)
{
B_marker[jb] = ic;
num_nonzeros++;
}
}
}
}
jj_count[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
jj = jj_count[0];
for (i1 = 1; i1 < ii; i1++)
jj += jj_count[i1];
for (i1 = ns; i1 < ne; i1++)
C_i[i1] += jj;
}
else
{
C_i[nrows_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
C_i[nrows_A] += jj_count[i1];
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize(C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ib = 0; ib < ncols_B; ib++)
B_marker[ib] = -1;
counter = C_i[ns];
for (ic = ns; ic < ne; ic++)
{
row_start = C_i[ic];
if (allsquare)
{
B_marker[ic] = counter;
C_data[counter] = 0;
C_j[counter] = ic;
counter++;
}
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
ja = A_j[ia];
a_entry = A_data[ia];
for (ib = B_i[ja]; ib < B_i[ja+1]; ib++)
{
jb = B_j[ib];
b_entry = B_data[ib];
if (B_marker[jb] < row_start)
{
B_marker[jb] = counter;
C_j[B_marker[jb]] = jb;
C_data[B_marker[jb]] = a_entry*b_entry;
counter++;
}
else
C_data[B_marker[jb]] += a_entry*b_entry;
}
}
}
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixMultiply( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
HYPRE_Int exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
hypre_assert(exec != HYPRE_EXEC_UNSET);
hypre_CSRMatrix *C = NULL;
if (exec == HYPRE_EXEC_HOST)
{
C = hypre_CSRMatrixMultiplyHost(A,B);
}
#if defined(HYPRE_USING_CUDA)
else
{
C = hypre_CSRMatrixMultiplyDevice(A,B);
}
#endif
return C;
}
hypre_CSRMatrix *
hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
hypre_CSRMatrix *B;
HYPRE_Complex *B_data;
HYPRE_Int *B_i;
HYPRE_Int *B_j;
HYPRE_Int zeros;
HYPRE_Int i, j;
HYPRE_Int pos_A, pos_B;
zeros = 0;
for (i=0; i < num_nonzeros; i++)
if (hypre_cabs(A_data[i]) <= tol)
zeros++;
if (zeros)
{
B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros);
hypre_CSRMatrixInitialize(B);
B_i = hypre_CSRMatrixI(B);
B_j = hypre_CSRMatrixJ(B);
B_data = hypre_CSRMatrixData(B);
B_i[0] = 0;
pos_A = 0;
pos_B = 0;
for (i=0; i < nrows_A; i++)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (hypre_cabs(A_data[j]) <= tol)
{
pos_A++;
}
else
{
B_data[pos_B] = A_data[pos_A];
B_j[pos_B] = A_j[pos_A];
pos_B++;
pos_A++;
}
}
B_i[i+1] = pos_B;
}
return B;
}
else
return NULL;
}
/******************************************************************************
*
* Finds transpose of a hypre_CSRMatrix
*
*****************************************************************************/
/**
* idx = idx2*dim1 + idx1
* -> ret = idx1*dim2 + idx2
* = (idx%dim1)*dim2 + idx/dim1
*/
static inline HYPRE_Int transpose_idx(HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2)
{
return idx%dim1*dim2 + idx/dim1;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nonzerosA = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *AT_data;
/*HYPRE_Int *AT_i;*/
HYPRE_Int *AT_j;
HYPRE_Int num_rowsAT;
HYPRE_Int num_colsAT;
HYPRE_Int num_nonzerosAT;
HYPRE_Int max_col;
HYPRE_Int i, j;
/*--------------------------------------------------------------
* First, ascertain that num_cols and num_nonzeros has been set.
* If not, set them.
*--------------------------------------------------------------*/
if (! num_nonzerosA)
{
num_nonzerosA = A_i[num_rowsA];
}
if (num_rowsA && num_nonzerosA && ! num_colsA)
{
max_col = -1;
for (i = 0; i < num_rowsA; ++i)
{
for (j = A_i[i]; j < A_i[i+1]; j++)
{
if (A_j[j] > max_col)
max_col = A_j[j];
}
}
num_colsA = max_col+1;
}
num_rowsAT = num_colsA;
num_colsAT = num_rowsA;
num_nonzerosAT = num_nonzerosA;
*AT = hypre_CSRMatrixCreate(num_rowsAT, num_colsAT, num_nonzerosAT);
if (0 == num_colsA)
{
// JSP: parallel counting sorting breaks down
// when A has no columns
hypre_CSRMatrixInitialize(*AT);
return 0;
}
AT_j = hypre_CTAlloc(HYPRE_Int, num_nonzerosAT, HYPRE_MEMORY_SHARED);
hypre_CSRMatrixJ(*AT) = AT_j;
if (data)
{
AT_data = hypre_CTAlloc(HYPRE_Complex, num_nonzerosAT, HYPRE_MEMORY_SHARED);
hypre_CSRMatrixData(*AT) = AT_data;
}
/*-----------------------------------------------------------------
* Parallel count sort
*-----------------------------------------------------------------*/
HYPRE_Int *bucket = hypre_TAlloc(
HYPRE_Int, (num_colsA + 1)*hypre_NumThreads(), HYPRE_MEMORY_SHARED);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rowsA);
hypre_assert(iEnd >= 0 && iEnd <= num_rowsA);
HYPRE_Int i, j;
memset(bucket + my_thread_num*num_colsA, 0, sizeof(HYPRE_Int)*num_colsA);
/*-----------------------------------------------------------------
* Count the number of entries that will go into each bucket
* bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array
*-----------------------------------------------------------------*/
for (j = A_i[iBegin]; j < A_i[iEnd]; ++j) {
HYPRE_Int idx = A_j[j];
bucket[my_thread_num*num_colsA + idx]++;
}
/*-----------------------------------------------------------------
* Parallel prefix sum of bucket with length num_colsA * num_threads
* accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads]
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = my_thread_num*num_colsA + 1; i < (my_thread_num + 1)*num_colsA; ++i) {
HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA);
HYPRE_Int transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_colsA);
bucket[transpose_i] += bucket[transpose_i_minus_1];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#pragma omp master
#endif
{
for (i = 1; i < num_threads; ++i) {
HYPRE_Int j0 = num_colsA*i - 1, j1 = num_colsA*(i + 1) - 1;
HYPRE_Int transpose_j0 = transpose_idx(j0, num_threads, num_colsA);
HYPRE_Int transpose_j1 = transpose_idx(j1, num_threads, num_colsA);
bucket[transpose_j1] += bucket[transpose_j0];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0) {
HYPRE_Int transpose_i0 = transpose_idx(num_colsA*my_thread_num - 1, num_threads, num_colsA);
HYPRE_Int offset = bucket[transpose_i0];
for (i = my_thread_num*num_colsA; i < (my_thread_num + 1)*num_colsA - 1; ++i) {
HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA);
bucket[transpose_i] += offset;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*----------------------------------------------------------------
* Load the data and column numbers of AT
*----------------------------------------------------------------*/
if (data) {
for (i = iEnd - 1; i >= iBegin; --i) {
for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) {
HYPRE_Int idx = A_j[j];
--bucket[my_thread_num*num_colsA + idx];
HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx];
AT_data[offset] = A_data[j];
AT_j[offset] = i;
}
}
}
else {
for (i = iEnd - 1; i >= iBegin; --i) {
for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) {
HYPRE_Int idx = A_j[j];
--bucket[my_thread_num*num_colsA + idx];
HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx];
AT_j[offset] = i;
}
}
}
} /*end parallel region */
hypre_CSRMatrixI(*AT) = bucket;
// JSP: bucket is hypre_NumThreads() times longer than
// the size needed for AT_i, but this should be OK.
// If the memory size is a concern, we can allocate
// a new memory for AT_i and copy from bucket.
hypre_CSRMatrixI(*AT)[num_colsA] = num_nonzerosA;
return (0);
}
HYPRE_Int
hypre_CSRMatrixTranspose(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Int exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
hypre_assert(exec != HYPRE_EXEC_UNSET);
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_CSRMatrixTransposeHost(A, AT, data);
}
#if defined(HYPRE_USING_CUDA)
else
{
ierr = hypre_CSRMatrixTransposeDevice(A, AT, data);
}
#endif
return ierr;
}
HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext,
HYPRE_BigInt first_col_diag_B,
HYPRE_BigInt last_col_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_BigInt *col_map_offd_B,
HYPRE_Int *num_cols_offd_C_ptr,
HYPRE_BigInt **col_map_offd_C_ptr,
hypre_CSRMatrix **Bext_diag_ptr,
hypre_CSRMatrix **Bext_offd_ptr)
{
HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext);
HYPRE_Int B_ext_diag_size = 0;
HYPRE_Int B_ext_offd_size = 0;
HYPRE_Int *B_ext_diag_i = NULL;
HYPRE_Int *B_ext_diag_j = NULL;
HYPRE_Complex *B_ext_diag_data = NULL;
HYPRE_Int *B_ext_offd_i = NULL;
HYPRE_Int *B_ext_offd_j = NULL;
HYPRE_Complex *B_ext_offd_data = NULL;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_BigInt *temp;
HYPRE_Int max_num_threads;
HYPRE_Int cnt = 0;
hypre_CSRMatrix *Bext_diag = NULL;
hypre_CSRMatrix *Bext_offd = NULL;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int num_cols_offd_C = 0;
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_rows_Bext/num_threads;
rest = num_rows_Bext - size*num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i=ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_rows_Bext] = B_ext_diag_size;
B_ext_offd_i[num_rows_Bext] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
/* This computes the mappings */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i=0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_C = 1;
HYPRE_BigInt value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size);
hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST;
Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size);
hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i;
hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j;
hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data;
hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i;
hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j;
hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data;
*col_map_offd_C_ptr = col_map_offd_C;
*Bext_diag_ptr = Bext_diag;
*Bext_offd_ptr = Bext_offd;
*num_cols_offd_C_ptr = num_cols_offd_C;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorder:
* Reorders the column and data arrays of a square CSR matrix, such that the
* first entry in each row is the diagonal one.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A)
{
HYPRE_Int i, j, tempi, row_size;
HYPRE_Complex tempd;
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A);
/* the matrix should be square */
if (num_rowsA != num_colsA)
return -1;
for (i = 0; i < num_rowsA; i++)
{
row_size = A_i[i+1]-A_i[i];
for (j = 0; j < row_size; j++)
{
if (A_j[j] == i)
{
if (j != 0)
{
tempi = A_j[0];
A_j[0] = A_j[j];
A_j[j] = tempi;
tempd = A_data[0];
A_data[0] = A_data[j];
A_data[j] = tempd;
}
break;
}
/* diagonal element is missing */
if (j == row_size-1)
return -2;
}
A_j += row_size;
A_data += row_size;
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddPartial:
* adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i]
* defines to which row of A the i-th row of B is added, and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int *row_nums)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, jcol, num_nonzeros;
HYPRE_Int pos, i, i2, j, cnt;
HYPRE_Int *marker;
HYPRE_Int *map;
HYPRE_Int *temp;
if (ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n");
return NULL;
}
map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
for (i=0; i < nrows_B; i++)
{
map[i] = i;
temp[i] = row_nums[i];
}
hypre_qsort2i(temp,map,0,nrows_B-1);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
num_nonzeros = 0;
C_i[0] = 0;
cnt = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
}
else
break;
}
}
C_i[ic+1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize(C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
marker[ia] = -1;
cnt = 0;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic+1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2+1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
else
break;
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(map, HYPRE_MEMORY_HOST);
hypre_TFree(temp, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSumElts:
* Returns the sum of all matrix elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_CSRMatrixData( A );
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i;
for ( i=0; i<num_nonzeros; ++i ) sum += data[i];
return sum;
}
HYPRE_Real hypre_CSRMatrixFnorm( hypre_CSRMatrix *A )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_CSRMatrixData( A );
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i, nrows, *A_i;
nrows = hypre_CSRMatrixNumRows(A);
A_i = hypre_CSRMatrixI(A);
hypre_assert(num_nonzeros == A_i[nrows]);
for ( i=0; i<num_nonzeros; ++i ) {
HYPRE_Complex v = data[i];
sum += v * v;
}
return sqrt(sum);
}
|
matrix_multiply.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int multiply_parallel(int m, int n, int p, int a[][5], int b[][5], int c[][5])
{
int i,j,k,sum;
#pragma omp parallel shared(a,b,c) private(i,j,k)
{
printf("Before Barrier construct - thread no = %d\n", omp_get_thread_num());
for (i=0; i<m; i=i+1)
{
#pragma omp reduction(+: sum)
for (j=0; j<n; j=j+1)
{
sum=0.;
for (k=0; k<p; k=k+1)
{
sum=(a[i][j])+((b[i][k])*(c[k][j]));
}
a[i][j] = sum;
}
}
// No barrier is placed here.
printf("After Barrier construct - thread no = %d\n", omp_get_thread_num());
// For a task after the matrix multiplication.
}
return 0;
}
int multiply_parallel_barrier(int m, int n, int p, int a[][5], int b[][5], int c[][5])
{
int i,j,k,sum;
#pragma omp parallel shared(a,b,c) private(i,j,k)
{
printf("Before Barrier construct - thread no = %d\n", omp_get_thread_num());
for (i=0; i<m; i=i+1)
{
#pragma omp reduction(+: sum)
for (j=0; j<n; j=j+1)
{
sum=0.;
for (k=0; k<p; k=k+1)
{
sum=(a[i][j])+((b[i][k])*(c[k][j]));
}
a[i][j] = sum;
}
}
#pragma omp barrier
printf("After Barrier construct - thread no = %d\n", omp_get_thread_num());
// For a task after the matrix multiplication.
}
return 0;
}
int main()
{
int m, n, p, i, j;
m = 5;
n = 5;
p = 5;
int a[5][5] = {{1, 2, 3, 4, 600}, {3, 4, 600, 6, 7}, {1, 2, 3, 4, 600}, {3, 4, 600, 6, 7}, {1, 2, 3, 4, 600}}; // Dimensions m x n
int b[5][5] = {{600, 6, 7, 8, 9}, {7, 8, 9, 600, 3}, {1, 2, 3, 4, 600}, {3, 4, 600, 6, 7}, {1, 2, 3, 4, 600}}; // Dimensions n x p
int parallel_mul[5][5];
// Parallel Multiplication without barrier
printf("Implementation without barrier :\n");
multiply_parallel(m, n, p, parallel_mul, a, b);
printf("\n\n");
// Parallel Multiplication with barrier for after multiplication task.
printf("Implementation with barrier :\n");
multiply_parallel_barrier(m, n, p, parallel_mul, a, b);
} |
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// \brief This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final
: public Decl,
private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> {
friend class ASTDeclReader;
friend TrailingObjects;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// \brief This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
private:
friend class ASTDeclReader;
/// \brief Combiner for declare reduction construct.
Expr *Combiner;
/// \brief Initializer for declare reduction construct.
Expr *Initializer;
/// \brief Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope)
: ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), Combiner(nullptr),
Initializer(nullptr), PrevDeclInScope(PrevDeclInScope) {}
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// \brief Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// \brief Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// \brief Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// \brief Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// \brief Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// \brief Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E) { Initializer = E; }
/// \brief Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, SourceLocation(), Id, Type,
nullptr, SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
} // end namespace clang
#endif
|
GB_binop__second_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int32)
// A*D function (colscale): GB (_AxD__second_int32)
// D*A function (rowscale): GB (_DxB__second_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
pimonte_VSL_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
//VSL Variables
#include "mkl_vsl.h"
#define BRNG VSL_BRNG_MCG31
#define METHOD 0
#define BLOCK_SIZE 500
int main(){
unsigned int iter=200000000;
int i,j;
double x, y;
double dUnderCurve=0.0;
double pi=0.0;
VSLStreamStatePtr stream; //You need one stream for each thread
double end_time,start_time;
start_time=clock();
#pragma omp parallel private(stream,x,y,i) reduction(+:dUnderCurve)
{
double r[BLOCK_SIZE*2]; //Careful!!!
//you need a private copy of whole array for each thread
vslNewStream( &stream, BRNG, (int)clock() );
#pragma omp for
for(j=0;j<iter/BLOCK_SIZE;j++) {
vdRngUniform( METHOD, stream, BLOCK_SIZE*2, r, 0.0, 1.0 );
//Create random numbers into array r
for (i=0;i<BLOCK_SIZE;i++) {
x=r[i]; //X Coordinate
y=r[i+BLOCK_SIZE]; //Y Coordinate
if (x*x + y*y <= 1.0) { //is distance from Origin under Curve
dUnderCurve++;
}
}
}
vslDeleteStream( &stream );
}
pi = dUnderCurve / (double) iter * 4 ;
end_time=clock();
printf ("pi = %10.9f\n", pi);
printf ("Seconds = %10.9f\n",(double)((end_time-start_time)/1000.0));
return 0;
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include "box.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)xcalloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
char** random_paths = (char**)xcalloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)xcalloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)xcalloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
int size = w > h ? w : h;
image im;
if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3);
else im = load_image_color(paths[i], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
image sized = resize_image(crop, w, h);
//show_image(im, "orig");
//show_image(sized, "sized");
//show_image(sized, paths[i]);
//wait_until_press_key_cv();
//printf("w = %d, h = %d \n", sized.w, sized.h);
free_image(im);
free_image(crop);
X.vals[i] = sized.data;
X.cols = sized.h*sized.w*sized.c;
}
return X;
}
extern int check_mistakes;
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) {
printf("\n Error in read_boxes() \n");
getchar();
}
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
int min_w_h = 0;
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath);
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
if (check_mistakes) getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath);
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
if (min_w_h == 0) min_w_h = w*net_w;
if (min_w_h > w*net_w) min_w_h = w*net_w;
if (min_w_h > h*net_h) min_w_h = h*net_h;
}
free(boxes);
return min_w_h;
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps)
{
int i;
memset(truth, 0, k * sizeof(float));
int count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
truth[i] = (1 - label_smooth_eps);
++count;
}
else {
truth[i] = label_smooth_eps / (k - 1);
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
void blend_truth(float *new_truth, int boxes, float *old_truth)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*t_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup,
int left_shift, int right_shift, int top_shift, int bot_shift)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
int new_t = count_new_truth;
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + new_t*t_size;
new_truth_ptr[0] = 0;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
float xb = old_truth_ptr[0];
float yb = old_truth_ptr[1];
float wb = old_truth_ptr[2];
float hb = old_truth_ptr[3];
// shift 4 images
if (i_mixup == 0) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 1) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 2) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
if (i_mixup == 3) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
int left = (xb - wb / 2)*w;
int right = (xb + wb / 2)*w;
int top = (yb - hb / 2)*h;
int bot = (yb + hb / 2)*h;
// fix out of bound
if (left < 0) {
float diff = (float)left / w;
xb = xb - diff / 2;
wb = wb + diff;
}
if (right > w) {
float diff = (float)(right - w) / w;
xb = xb - diff / 2;
wb = wb - diff;
}
if (top < 0) {
float diff = (float)top / h;
yb = yb - diff / 2;
hb = hb + diff;
}
if (bot > h) {
float diff = (float)(bot - h) / h;
yb = yb - diff / 2;
hb = hb - diff;
}
left = (xb - wb / 2)*w;
right = (xb + wb / 2)*w;
top = (yb - hb / 2)*h;
bot = (yb + hb / 2)*h;
// leave only within the image
if(left >= 0 && right <= w && top >= 0 && bot <= h &&
wb > 0 && wb < 1 && hb > 0 && hb < 1 &&
xb > 0 && xb < 1 && yb > 0 && yb < 1)
{
new_truth_ptr[0] = xb;
new_truth_ptr[1] = yb;
new_truth_ptr[2] = wb;
new_truth_ptr[3] = hb;
new_truth_ptr[4] = old_truth_ptr[4];
new_t++;
}
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup,
float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
assert(use_mixup != 2);
if (use_mixup == 3 && letter_box) {
printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n");
exit(0);
}
if (random_gen() % 2 == 0) use_mixup = 0;
int i;
int *cut_x = NULL, *cut_y = NULL;
if (use_mixup == 3) {
cut_x = (int*)calloc(n, sizeof(int));
cut_y = (int*)calloc(n, sizeof(int));
const float min_offset = 0.2; // 20%
for (i = 0; i < n; ++i) {
cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset));
cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset));
}
}
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1)
char **random_paths;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
for (i = 0; i < n; ++i) {
float *truth = (float*)xcalloc(5 * boxes, sizeof(float));
const char *filename = random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
if (check_mistakes) {
printf("\n Error in load_data_detection() - OpenCV \n");
getchar();
}
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
if (use_blur) {
int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image
if (tmp_blur == 0) blur = 0;
else if (tmp_blur == 1) blur = 1;
else blur = use_blur;
}
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
//float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
blur, boxes, truth);
if (use_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
}
else if (use_mixup == 1) {
if (i_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
}
else if (i_mixup == 1) {
image old_img = make_empty_image(w, h, c);
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(d.y.vals[i], boxes, truth);
free_image(old_img);
d.X.vals[i] = ai.data;
}
}
else if (use_mixup == 3) {
if (i_mixup == 0) {
image tmp_img = make_image(w, h, c);
d.X.vals[i] = tmp_img.data;
}
if (flip) {
int tmp = pleft;
pleft = pright;
pright = tmp;
}
const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow)));
const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh)));
const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow)));
const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh)));
int k, x, y;
for (k = 0; k < c; ++k) {
for (y = 0; y < h; ++y) {
int j = y*w + k*w*h;
if (i_mixup == 0 && y < cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 1 && y < cut_y[i]) {
int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float));
}
if (i_mixup == 2 && y >= cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 3 && y >= cut_y[i]) {
int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float));
}
}
}
blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift);
free_image(ai);
ai.data = d.X.vals[i];
}
if (show_imgs && i_mixup == use_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
//sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
//char buff_src[1000];
//sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
//show_image_mat(src, buff_src);
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
if (random_paths) free(random_paths);
}
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int data_size = new_img.w * new_img.h * new_img.c;
int i;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter,
float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
assert(use_mixup < 2);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)xcalloc(5 * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)xcalloc(args.threads, sizeof(data));
pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)xcalloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)xcalloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv)
{
char **paths_stored = paths;
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv);
d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps);
if (use_mixup && rand_int(0, 1)) {
char **paths_mix = get_random_paths(paths_stored, n, m);
data d2 = { 0 };
d2.shallow = 0;
d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv);
d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix);
data d3 = { 0 };
d3.shallow = 0;
data d4 = { 0 };
d4.shallow = 0;
if (use_mixup >= 3) {
char **paths_mix3 = get_random_paths(paths_stored, n, m);
d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv);
d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix3);
char **paths_mix4 = get_random_paths(paths_stored, n, m);
d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv);
d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix4);
}
// mix
int i, j;
for (i = 0; i < d2.X.rows; ++i) {
int mixup = use_mixup;
if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic
// MixUp -----------------------------------
if (mixup == 1) {
// mix images
for (j = 0; j < d2.X.cols; ++j) {
d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f;
}
// mix labels
for (j = 0; j < d2.y.cols; ++j) {
d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f;
}
}
// CutMix -----------------------------------
else if (mixup == 2) {
const float min = 0.3; // 0.3*0.3 = 9%
const float max = 0.8; // 0.8*0.8 = 64%
const int cut_w = rand_int(w*min, w*max);
const int cut_h = rand_int(h*min, h*max);
const int cut_x = rand_int(0, w - cut_w - 1);
const int cut_y = rand_int(0, h - cut_h - 1);
const int left = cut_x;
const int right = cut_x + cut_w;
const int top = cut_y;
const int bot = cut_y + cut_h;
assert(cut_x >= 0 && cut_x <= w);
assert(cut_y >= 0 && cut_y <= h);
assert(cut_w >= 0 && cut_w <= w);
assert(cut_h >= 0 && cut_h <= h);
assert(right >= 0 && right <= w);
assert(bot >= 0 && bot <= h);
assert(top <= bot);
assert(left <= right);
const float alpha = (float)(cut_w*cut_h) / (float)(w*h);
const float beta = 1 - alpha;
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = top; y < bot; ++y) {
for (x = left; x < right; ++x) {
int j = x + y*w + c*w*h;
d.X.vals[i][j] = d2.X.vals[i][j];
}
}
}
//printf("\n alpha = %f, beta = %f \n", alpha, beta);
// mix labels
for (j = 0; j < d.y.cols; ++j) {
d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha;
}
}
// Mosaic -----------------------------------
else if (mixup == 3)
{
const float min_offset = 0.2; // 20%
const int cut_x = rand_int(w*min_offset, w*(1 - min_offset));
const int cut_y = rand_int(h*min_offset, h*(1 - min_offset));
float s1 = (float)(cut_x * cut_y) / (w*h);
float s2 = (float)((w - cut_x) * cut_y) / (w*h);
float s3 = (float)(cut_x * (h - cut_y)) / (w*h);
float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h);
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
int j = x + y*w + c*w*h;
if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j];
if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j];
if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j];
if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j];
}
}
}
for (j = 0; j < d.y.cols; ++j) {
const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4)));
d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s;
}
}
}
free_data(d2);
if (use_mixup >= 3) {
free_data(d3);
free_data(d4);
}
}
#ifdef OPENCV
if (use_blur) {
int i;
for (i = 0; i < d.X.rows; ++i) {
if (random_gen() % 2) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
int ksize = use_blur;
if (use_blur == 1) ksize = 17;
image blurred = blur_image(im, ksize);
free_image(im);
d.X.vals[i] = blurred.data;
//if (i == 0) {
// show_image(im, "Not blurred");
// show_image(blurred, "blurred");
// wait_until_press_key_cv();
//}
}
}
}
#endif // OPENCV
if (show_imgs) {
int i, j;
for (i = 0; i < d.X.rows; ++i) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
char buff[1000];
sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen());
save_image(im, buff);
char buff_string[1000];
sprintf(buff_string, "\n Classes: ");
for (j = 0; j < d.y.cols; ++j) {
if (d.y.vals[i][j] > 0) {
char buff_tmp[100];
sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]);
strcat(buff_string, buff_tmp);
}
}
printf("%s \n", buff_string);
if (show_imgs == 1) {
show_image(im, buff);
wait_until_press_key_cv();
}
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
}
if (m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = w;
d.h = h;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)xcalloc(num, sizeof(float*));
r.y.vals = (float**)xcalloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)xcalloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train ={0};
data test ={0};
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
strgrp.c | /*
Group similar strings
Copyright (C) 2014 Andrew Jeffery <andrew@aj.id.au>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ccan/darray/darray.h"
#include "ccan/stringmap/stringmap.h"
#include "ccan/tal/tal.h"
#include "ccan/tal/str/str.h"
#include "strgrp.h"
#include "config.h"
#define CHAR_N_VALUES (1 << CHAR_BIT)
typedef darray(struct strgrp_grp *) darray_grp;
typedef darray(struct strgrp_item *) darray_item;
typedef stringmap(struct strgrp_grp *) stringmap_grp;
struct grp_score {
struct strgrp_grp *grp;
double score;
};
typedef darray(struct grp_score *) darray_score;
struct strgrp {
double threshold;
stringmap_grp known;
unsigned int n_grps;
darray_grp grps;
struct grp_score *scores;
int16_t pop[CHAR_N_VALUES];
};
struct strgrp_iter {
const struct strgrp *ctx;
int i;
};
struct strgrp_grp {
const char *key;
size_t key_len;
darray_item items;
int32_t n_items;
int16_t pop[CHAR_N_VALUES];
};
struct strgrp_grp_iter {
const struct strgrp_grp *grp;
int i;
};
struct strgrp_item {
const char *key;
void *value;
};
/* String vector cosine similarity[1]
*
* [1] http://blog.nishtahir.com/2015/09/19/fuzzy-string-matching-using-cosine-similarity/
*/
static inline void
strpopcnt(const char *const str, int16_t pop[CHAR_N_VALUES]) {
const char *c;
memset(pop, 0, CHAR_N_VALUES * sizeof(*pop));
for(c = str; *c; c++) {
assert(*c >= 0);
pop[(unsigned char)*c]++;
}
}
static inline double
strcossim(const int16_t ref[CHAR_N_VALUES], const int16_t key[CHAR_N_VALUES]) {
int32_t saibi = 0;
int32_t sai2 = 0;
int32_t sbi2 = 0;
size_t i;
for (i = 0; i < CHAR_N_VALUES; i++) {
saibi += ref[i] * key[i];
sai2 += ref[i] * ref[i];
sbi2 += key[i] * key[i];
}
return 1.0 - (2 * acos(saibi / sqrt(sai2 * sbi2)) / M_PI);
}
/* Low-cost filter functions */
static inline double
cossim_correction(const double s)
{
return -((s - 0.5) * (s - 0.5)) + 0.33;
}
static inline bool
should_grp_score_cos(const struct strgrp *const ctx,
struct strgrp_grp *const grp, const char *const str) {
const double s1 = strcossim(ctx->pop, grp->pop);
const double s2 = s1 + cossim_correction(s1);
return ctx->threshold <= s2;
}
static inline bool
should_grp_score_len(const struct strgrp *const ctx,
const struct strgrp_grp *const grp, const char *const str) {
const double lstr = (double) strlen(str);
const double lkey = (double) grp->key_len;
const double lmin = (lstr > lkey) ? lkey : lstr;
const double s = sqrt((2 * lmin * lmin) / (1.0 * lstr * lstr + lkey * lkey));
return ctx->threshold <= s;
}
/* Scoring - Longest Common Subsequence[2]
*
* [2] https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
*/
#define ROWS 2
static inline int cmi(int i, int j) {
return ROWS * j + i;
}
static inline int16_t
lcs(const char *const a, const char *const b) {
const int lb = strlen(b);
const int lbp1 = lb + 1;
int16_t *const lookup = calloc(ROWS * lbp1, sizeof(int16_t));
if (!lookup) {
return -1;
}
int ia, ib;
for (ia = (strlen(a) - 1); ia >= 0; ia--) {
const char iav = a[ia];
const int ial = (ia + 1) & 1; // ia last
for (ib = lb - 1; ib >= 0; ib--) {
const char ibv = b[ib];
const int iac = ia & 1; // ia current
const int ibl = ib + 1; // ib last
// don't need separate "ib current" as it's just ib
if (iav == ibv) {
lookup[cmi(iac, ib)] = 1 + lookup[cmi(ial, ibl)];
} else {
const int16_t valb = lookup[cmi(ial, ib)];
const int16_t vabl = lookup[cmi(iac, ibl)];
lookup[cmi(iac, ib)] = (valb > vabl) ? valb : vabl;
}
}
}
int16_t result = lookup[0];
free(lookup);
return result;
}
#undef ROWS
static inline double
nlcs(const char *const a, const char *const b) {
const double lcss = lcs(a, b);
const double la = (double) strlen(a);
const double lb = (double) strlen(b);
const double s = sqrt((2 * lcss * lcss) / (la * la + lb * lb));
return s;
}
static inline double
grp_score(const struct strgrp_grp *const grp, const char *const str) {
return nlcs(grp->key, str);
}
/* Structure management */
static struct strgrp_item *
new_item(tal_t *const tctx, const char *const str, void *const data) {
struct strgrp_item *i = talz(tctx, struct strgrp_item);
if (!i) {
return NULL;
}
i->key = tal_strdup(i, str);
i->value = data;
return i;
}
static bool
add_item(struct strgrp_grp *const ctx, const char *const str,
void *const data) {
struct strgrp_item *i = new_item(ctx, str, data);
if (!i) {
return false;
}
darray_push(ctx->items, i);
ctx->n_items++;
return true;
}
static void
free_grp(struct strgrp_grp *grp) {
darray_free(grp->items);
}
static struct strgrp_grp *
new_grp(tal_t *const tctx, const char *const str, void *const data) {
struct strgrp_grp *b = talz(tctx, struct strgrp_grp);
if (!b) {
return NULL;
}
b->key = tal_strdup(b, str);
b->key_len = strlen(str);
b->n_items = 0;
darray_init(b->items);
tal_add_destructor(b, free_grp);
if (!add_item(b, str, data)) {
return tal_free(b);
}
return b;
}
static struct strgrp_grp *
add_grp(struct strgrp *const ctx, const char *const str,
void *const data) {
struct strgrp_grp *b = new_grp(ctx, str, data);
if (!b) {
return NULL;
}
memcpy(b->pop, ctx->pop, sizeof(ctx->pop));
darray_push(ctx->grps, b);
ctx->n_grps++;
if (ctx->scores) {
if (!tal_resize(&ctx->scores, ctx->n_grps)) {
return NULL;
}
} else {
ctx->scores = tal_arr(ctx, struct grp_score, ctx->n_grps);
if (!ctx->scores) {
return NULL;
}
}
return b;
}
struct strgrp *
strgrp_new(const double threshold) {
struct strgrp *ctx = talz(NULL, struct strgrp);
ctx->threshold = threshold;
stringmap_init(ctx->known, NULL);
// n threads compare strings
darray_init(ctx->grps);
return ctx;
}
static inline void
cache(struct strgrp *const ctx, struct strgrp_grp *const grp,
const char *const str) {
*(stringmap_enter(ctx->known, str)) = grp;
}
static struct strgrp_grp *
grp_for(struct strgrp *const ctx, const char *const str) {
// Ensure ctx->pop is always populated. Returning null here indicates a new
// group should be created, at which point add_grp() copies ctx->pop into
// the new group's struct.
strpopcnt(str, ctx->pop);
if (!ctx->n_grps) {
return NULL;
}
{
struct strgrp_grp **const grp = stringmap_lookup(ctx->known, str);
if (grp) {
return *grp;
}
}
int i;
// Keep ccanlint happy in reduced feature mode
#if HAVE_OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (i = 0; i < ctx->n_grps; i++) {
struct strgrp_grp *grp = darray_item(ctx->grps, i);
ctx->scores[i].grp = grp;
ctx->scores[i].score = 0;
if (should_grp_score_len(ctx, grp, str)) {
if (should_grp_score_cos(ctx, grp, str)) {
ctx->scores[i].score = grp_score(grp, str);
}
}
}
struct grp_score *max = NULL;
for (i = 0; i < ctx->n_grps; i++) {
if (!max || ctx->scores[i].score > max->score) {
max = &(ctx->scores[i]);
}
}
return (max && max->score >= ctx->threshold) ? max->grp : NULL;
}
const struct strgrp_grp *
strgrp_grp_for(struct strgrp *const ctx, const char *const str) {
return grp_for(ctx, str);
}
const struct strgrp_grp *
strgrp_add(struct strgrp *const ctx, const char *const str,
void *const data) {
bool inserted = false;
// grp_for() populates the ctx->pop memory. add_grp() copies this memory
// into the strgrp_grp that it creates. It's assumed the ctx->pop memory
// has not been modified between the grp_for() and add_grp() calls.
struct strgrp_grp *pick = grp_for(ctx, str);
if (pick) {
inserted = add_item(pick, str, data);
} else {
pick = add_grp(ctx, str, data);
inserted = (NULL != pick);
}
if (inserted) {
assert(NULL != pick);
cache(ctx, pick, str);
}
return pick;
}
struct strgrp_iter *
strgrp_iter_new(struct strgrp *const ctx) {
struct strgrp_iter *iter = talz(ctx, struct strgrp_iter);
if (!iter) {
return NULL;
}
iter->ctx = ctx;
iter->i = 0;
return iter;
}
const struct strgrp_grp *
strgrp_iter_next(struct strgrp_iter *const iter) {
return (iter->ctx->n_grps == iter->i) ?
NULL : darray_item(iter->ctx->grps, iter->i++);
}
void
strgrp_iter_free(struct strgrp_iter *const iter) {
tal_free(iter);
}
struct strgrp_grp_iter *
strgrp_grp_iter_new(const struct strgrp_grp *const grp) {
struct strgrp_grp_iter *iter = talz(grp, struct strgrp_grp_iter);
if (!iter) {
return NULL;
}
iter->grp = grp;
iter->i = 0;
return iter;
}
const struct strgrp_item *
strgrp_grp_iter_next(struct strgrp_grp_iter *const iter) {
return (iter->grp->n_items == iter->i) ?
NULL : darray_item(iter->grp->items, iter->i++);
}
void
strgrp_grp_iter_free(struct strgrp_grp_iter *iter) {
tal_free(iter);
}
const char *
strgrp_grp_key(const struct strgrp_grp *const grp) {
return grp->key;
}
const char *
strgrp_item_key(const struct strgrp_item *const item) {
return item->key;
}
void *
strgrp_item_value(const struct strgrp_item *const item) {
return item->value;
}
void
strgrp_free(struct strgrp *const ctx) {
darray_free(ctx->grps);
stringmap_free(ctx->known);
tal_free(ctx);
}
void
strgrp_free_cb(struct strgrp *const ctx, void (*cb)(void *data)) {
struct strgrp_grp **grp;
struct strgrp_item **item;
darray_foreach(grp, ctx->grps) {
darray_foreach(item, (*grp)->items) {
cb((*item)->value);
}
}
strgrp_free(ctx);
}
static void
print_item(const struct strgrp_item *item) {
printf("\t%s\n", item->key);
}
static void
print_grp(const struct strgrp_grp *const grp) {
struct strgrp_item **item;
printf("%s:\n", grp->key);
darray_foreach(item, grp->items) {
print_item(*item);
}
printf("\n");
}
void
strgrp_print(const struct strgrp *const ctx) {
struct strgrp_grp **grp;
darray_foreach(grp, ctx->grps) {
print_grp(*grp);
}
}
|
PReLU.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/PReLU.c"
#else
void THNN_(PReLU_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THIndex_t nOutputPlane)
{
THTensor_(resizeAs)(output, input);
if (nOutputPlane == 0)
{
// handle shared parameter case
real w = *THTensor_(data)(weight);
TH_TENSOR_APPLY2(real, output, real, input,
*output_data = (*input_data > 0) ? *input_data : w*(*input_data);
);
}
else
{
input = THTensor_(newContiguous)(input);
int64_t bs = 1, ks = 1;
{
int64_t input_ndim = THTensor_(nDimension)(input);
if (input->size[input_ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]);
if (input_ndim > 1) {
bs = input->size[0];
for (int d = 2; d < input_ndim; d++) {
ks *= input->size[d];
}
}
}
real *output_data = THTensor_(data)(output);
real *input_data = THTensor_(data)(input);
real *weight_data = THTensor_(data)(weight);
THIndex_t i, j, k;
#pragma omp parallel for private(j,k)
for (i = 0; i < bs; ++i)
{
real* n_input_data = input_data + i*nOutputPlane*ks;
real* n_output_data = output_data + i*nOutputPlane*ks;
for (j = 0; j < nOutputPlane; ++j)
{
for (k = 0; k < ks; ++k)
n_output_data[k] = (n_input_data[k] > 0) ? n_input_data[k] : weight_data[j] * n_input_data[k];
n_input_data += ks;
n_output_data += ks;
}
}
THTensor_(free)(input);
}
}
void THNN_(PReLU_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THIndex_t nOutputPlane)
{
THNN_CHECK_NELEMENT(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
if (nOutputPlane == 0)
{
real w = THTensor_(data)(weight)[0];
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
if ((*input_data) > 0)
*gradInput_data = *gradOutput_data;
else
*gradInput_data = w * (*gradOutput_data);
);
}
else
{
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
weight = THTensor_(newContiguous)(weight);
const real *input_data = THTensor_(data)(input);
const real *gradOutput_data = THTensor_(data)(gradOutput);
const real *weight_data = THTensor_(data)(weight);
real *gradInput_data = THTensor_(data)(gradInput);
int64_t bs = 1, ks = 1;
{
int64_t input_ndim = THTensor_(nDimension)(input);
if (input->size[input_ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]);
if (input_ndim > 1) {
bs = input->size[0];
for (int d = 2; d < input_ndim; d++) {
ks *= input->size[d];
}
}
}
THIndex_t i, j, k;
#pragma omp parallel for private(j,k)
for (i = 0; i < bs; ++i)
{
const real *n_input_data = input_data + i*nOutputPlane*ks;
const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks;
real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks;
for (j = 0; j < nOutputPlane; ++j)
{
real w = weight_data[j];
for (k = 0; k < ks; ++k)
{
if (n_input_data[k] > 0)
n_gradInput_data[k] = n_gradOutput_data[k];
else
n_gradInput_data[k] = n_gradOutput_data[k] * w;
}
n_input_data += ks;
n_gradInput_data += ks;
n_gradOutput_data += ks;
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
}
void THNN_(PReLU_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *gradWeight,
THTensor *gradWeightBuf,
THTensor *gradWeightBuf2,
THIndex_t nOutputPlane,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THNN_CHECK_NELEMENT(input, gradOutput);
if (nOutputPlane == 0)
{
real *gradWeight_data = THTensor_(data)(gradWeight);
real sum = 0;
TH_TENSOR_APPLY2(real, input, real, gradOutput,
if ((*input_data) <= 0)
sum += (*input_data) * (*gradOutput_data);
);
gradWeight_data[0] += scale * sum;
}
else
{
THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous");
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
weight = THTensor_(newContiguous)(weight);
int64_t bs = 1, ks = 1;
{
int64_t input_ndim = THTensor_(nDimension)(input);
if (input->size[input_ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]);
if (input_ndim > 1) {
bs = input->size[0];
for (int d = 2; d < input_ndim; d++) {
ks *= input->size[d];
}
}
}
const real *input_data = THTensor_(data)(input);
const real *gradOutput_data = THTensor_(data)(gradOutput);
const real *weight_data = THTensor_(data)(weight);
real *gradWeight_data = THTensor_(data)(gradWeight);
THIndex_t i, j, k;
for (i = 0; i < bs; ++i)
{
const real *n_input_data = input_data + i*nOutputPlane*ks;
const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks;
for (j = 0; j < nOutputPlane; ++j)
{
real sum = 0;
for (k = 0; k < ks; ++k)
if (n_input_data[k] <= 0)
sum += n_gradOutput_data[k] * n_input_data[k];
gradWeight_data[j] += scale * sum;
n_input_data += ks;
n_gradOutput_data += ks;
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
}
#endif
|
sieve.c | /*
* Adapted from: http://w...content-available-to-author-only...s.org/sieve-of-eratosthenes
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
int sieveOfEratosthenes(int n)
{
// Create a boolean array "prime[0..n]" and initialize
// all entries it as true. A value in prime[i] will
// finally be false if i is Not a prime, else true.
int primes = 0;
bool *prime = (bool*) malloc((n+1)*sizeof(bool));
int sqrt_n = sqrt(n);
memset(prime, true,(n+1)*sizeof(bool));
int i, p;
#pragma omp parallel for
for (p=2; p <= sqrt_n; p++)
{
// If prime[p] is not changed, then it is a prime
if (prime[p] == true)
{
// Update all multiples of p
#pragma omp parallel for
for(i=p*2; i<=n; i += p)
prime[i] = false;
}
}
// count prime numbers
#pragma omp parallel for reduction(+:primes)
for (int p=2; p<=n; p++)
if (prime[p])
primes++;
return(primes);
}
int main()
{
int n = 100000000;
printf("%d\n",sieveOfEratosthenes(n));
return 0;
} |
GB_binop__bset_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int8)
// C=scalar+B GB (_bind1st__bset_int8)
// C=scalar+B' GB (_bind1st_tran__bset_int8)
// C=A+scalar GB (_bind2nd__bset_int8)
// C=A'+scalar GB (_bind2nd_tran__bset_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITSET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT8 || GxB_NO_BSET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bset_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bset_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_variant_construct_codegen_1.c | // expected-no-diagnostics
#ifndef HEADER
#define HEADER
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK1
#define N 100
void p_vxv(int *v1, int *v2, int *v3, int n);
void t_vxv(int *v1, int *v2, int *v3, int n);
#pragma omp declare variant(t_vxv) match(construct={target})
#pragma omp declare variant(p_vxv) match(construct={parallel})
void vxv(int *v1, int *v2, int *v3, int n) {
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i];
}
// CK1: define dso_local void @vxv
void p_vxv(int *v1, int *v2, int *v3, int n) {
#pragma omp for
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i] * 3;
}
// CK1: define dso_local void @p_vxv
#pragma omp declare target
void t_vxv(int *v1, int *v2, int *v3, int n) {
#pragma distribute simd
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i] * 2;
}
#pragma omp end declare target
// CK1: define dso_local void @t_vxv
// CK1-LABEL: define {{[^@]+}}@test
int test(void) {
int v1[N], v2[N], v3[N];
// init
for (int i = 0; i < N; i++) {
v1[i] = (i + 1);
v2[i] = -(i + 1);
v3[i] = 0;
}
#pragma omp target teams map(to: v1[:N],v2[:N]) map(from: v3[:N])
{
vxv(v1, v2, v3, N);
}
// CK1: call void @__omp_offloading_[[OFFLOAD:.+]]({{.+}})
vxv(v1, v2, v3, N);
// CK1: call void @vxv
#pragma omp parallel
{
vxv(v1, v2, v3, N);
}
// CK1: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, [100 x i32]*, [100 x i32]*, [100 x i32]*)* [[PARALLEL_REGION:@.+]] to void
return 0;
}
// CK1: define internal void @__omp_offloading_[[OFFLOAD]]({{.+}})
// CK1: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, [100 x i32]*, [100 x i32]*, [100 x i32]*)* [[TARGET_REGION:@.+]] to void
// CK1: define internal void [[TARGET_REGION]](
// CK1: call void @t_vxv
// CK1: define internal void [[PARALLEL_REGION]](
// CK1: call void @p_vxv
#endif // CK1
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -verify -fopenmp-simd -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK2 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK2
void test_teams(int ***v1, int ***v2, int ***v3, int n);
void test_target(int ***v1, int ***v2, int ***v3, int n);
void test_parallel(int ***v1, int ***v2, int ***v3, int n);
#pragma omp declare variant(test_teams) match(construct = {teams})
#pragma omp declare variant(test_target) match(construct = {target})
#pragma omp declare variant(test_parallel) match(construct = {parallel})
void test_base(int ***v1, int ***v2, int ***v3, int n) {
for (int i = 0; i < n; i++)
for (int j = 0; j < n; ++j)
for (int k = 0; k < n; ++k)
v3[i][j][k] = v1[i][j][k] * v2[i][j][k];
}
#pragma omp declare target
void test_teams(int ***v1, int ***v2, int ***v3, int n) {
#pragma omp distribute parallel for simd collapse(2)
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
for (int k = 0; k < n; ++k)
v3[i][j][k] = v1[i][j][k] * v2[i][j][k];
}
#pragma omp end declare target
#pragma omp declare target
void test_target(int ***v1, int ***v2, int ***v3, int n) {
#pragma omp parallel for simd collapse(3)
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
for (int k = 0; k < n; ++k)
v3[i][j][k] = v1[i][j][k] * v2[i][j][k];
}
#pragma omp end declare target
void test_parallel(int ***v1, int ***v2, int ***v3, int n) {
#pragma omp for collapse(3)
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
for (int k = 0; k < n; ++k)
v3[i][j][k] = v1[i][j][k] * v2[i][j][k];
}
// CK2-LABEL: define {{[^@]+}}@test
void test(int ***v1, int ***v2, int ***v3, int n) {
int i;
#pragma omp target
#pragma omp teams
{
test_base(v1, v2, v3, 0);
}
// CK2: call void @__omp_offloading_[[OFFLOAD_1:.+]]({{.+}})
#pragma omp target
{
test_base(v1, v2, v3, 0);
}
// CK2: call void @__omp_offloading_[[OFFLOAD_2:.+]]({{.+}})
#pragma omp parallel
{
test_base(v1, v2, v3, 0);
}
// CK2: call void ({{.+}}) @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32****, i32****, i32****)* [[PARALLEL_REGION:@.+]] to void
}
// CK2: define internal void @__omp_offloading_[[OFFLOAD_1]]({{.+}})
// CK2: call void ({{.+}}) @__kmpc_fork_teams(%struct.ident_t* {{.+}}, i32 3, void ({{.+}})* bitcast (void (i32*, i32*, i32****, i32****, i32****)* [[TARGET_REGION_1:@.+]] to void
// CK2: define internal void [[TARGET_REGION_1]](
// CK2: call void @test_teams
// CK2: define internal void @__omp_offloading_[[OFFLOAD_2]]({{.+}})
// CK2: call void @test_target
// CK2: define internal void [[PARALLEL_REGION]](
// CK2: call void @test_parallel
#endif // CK2
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -verify -fopenmp-simd -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK3 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK3
#define N 100
int t_for(int *v1, int *v2, int *v3, int n);
int t_simd(int *v1, int *v2, int *v3, int n);
#pragma omp declare variant(t_simd) match(construct = {simd})
#pragma omp declare variant(t_for) match(construct = {for})
int t(int *v1, int *v2, int *v3, int idx) {
return v1[idx] * v2[idx];
}
int t_for(int *v1, int *v2, int *v3, int idx) {
return v1[idx] * v2[idx];
}
#pragma omp declare simd
int t_simd(int *v1, int *v2, int *v3, int idx) {
return v1[idx] * v2[idx];
}
// CK3-LABEL: define {{[^@]+}}@test
void test(void) {
int v1[N], v2[N], v3[N];
// init
for (int i = 0; i < N; i++) {
v1[i] = (i + 1);
v2[i] = -(i + 1);
v3[i] = 0;
}
#pragma omp simd
for (int i = 0; i < N; i++) {
v3[i] = t(v1, v2, v3, i);
}
// CK3: call = call i32 @t_simd
#pragma omp for
for (int i = 0; i < N; i++) {
v3[i] = t(v1, v2, v3, i);
}
// CK3: call{{.+}} = call i32 @t_for
}
#endif // CK3
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --check-prefix=CK4
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=CK4
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --check-prefix=CK4
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --check-prefix=CK4
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CK4
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -verify -fopenmp-simd -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c -triple x86_64-unknown-linux -emit-pch -o %t -fopenmp-version=45 %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -x c -triple x86_64-unknown-linux -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DCK4 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-unknown-linux -fopenmp-targets=amdgcn-amd-amdhsa -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
#ifdef CK4
#define N 100
void not_selected_vxv(int *v1, int *v2, int *v3, int n);
void combined_vxv(int *v1, int *v2, int *v3, int n);
void all_vxv(int *v1, int *v2, int *v3, int n);
#pragma omp declare variant(all_vxv) match(construct={target,teams,parallel,for,simd})
#pragma omp declare variant(combined_vxv) match(construct={target,teams,parallel,for})
#pragma omp declare variant(not_selected_vxv) match(construct={parallel,for})
void vxv(int *v1, int *v2, int *v3, int n) {
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i];
}
void not_selected_vxv(int *v1, int *v2, int *v3, int n) {
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i] * 3;
}
#pragma omp declare target
void combined_vxv(int *v1, int *v2, int *v3, int n) {
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i] * 2;
}
#pragma omp end declare target
#pragma omp declare target
void all_vxv(int *v1, int *v2, int *v3, int n) {
for (int i = 0; i < n; i++) v3[i] = v1[i] * v2[i] * 4;
}
#pragma omp end declare target
// CK4-LABEL: define {{[^@]+}}@test
void test(void) {
int v1[N], v2[N], v3[N];
//init
for (int i = 0; i < N; i++) {
v1[i] = (i + 1);
v2[i] = -(i + 1);
v3[i] = 0;
}
#pragma omp target teams map(to: v1[:N],v2[:N]) map(from: v3[:N])
{
#pragma omp parallel for
for (int i = 0; i < N; i++)
vxv(v1, v2, v3, N);
}
// CK4: call void @__omp_offloading_[[OFFLOAD_1:.+]]({{.+}})
#pragma omp simd
for (int i = 0; i < N; i++)
vxv(v1, v2, v3, N);
// CK4: call void @vxv
#pragma omp target teams distribute parallel for simd map(from: v3[:N])
for (int i = 0; i < N; i++)
for (int i = 0; i < N; i++)
for (int i = 0; i < N; i++)
vxv(v1, v2, v3, N);
// CK4: call void @__omp_offloading_[[OFFLOAD_2:.+]]({{.+}})
}
// CK4-DAG: call void @all_vxv
// CK4-DAG: call void @combined_vxv
#endif // CK4
#endif // HEADER
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishAlignedMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if (((noise_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image,exception);
(void) NegateImage(charcoal_image,MagickFalse,exception);
(void) GrayscaleImage(charcoal_image,image->intensity,exception);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits=GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(colorize_image,q) == 0))
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,size_t *,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MagickPathExtent],
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
depth,
length,
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
depth=0;
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
i=(ssize_t) alpha;
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
if (length != 0)
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetPixelInfo(image,&pixel);
(void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case IndexPixelChannel:
return(0.0);
case IntensityPixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return(image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return(x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return(y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return(GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return(image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return(image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return(image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return(image->page.y);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return(image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return(GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return(image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double)GetImageDepth(image, fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,size_t *depth,double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
char
*q,
subexpression[MagickPathExtent];
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity >= ErrorException)
return(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
return(0.0);
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,
beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta,
exception);
return(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(*depth)++;
if (*depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
(*depth)--;
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return((~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha < 0.0)
return(0.0);
if (alpha > 1.0)
return(1.0);
return(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
return(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
return(gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(!!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return(log10(alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gamma=alpha-floor((alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
return(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0)
return(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha >= 0.0)
return(floor(alpha));
return(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
return(*beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
size_t
depth;
depth=0;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if (((fx_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImage)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view,
*interpolate_view;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
implode_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.alpha != OpaqueAlpha)
implode_image->alpha_trait=BlendPixelTrait;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,implode_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(image,p) == 0)
{
SetPixelBackgoundColor(implode_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(implode_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait implode_traits=GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(image,interpolate_view,implode_image,
method,(double) (factor*delta.x/scale.x+center.x),(double) (factor*
delta.y/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(morph_image,i);
PixelTrait traits=GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if (((morph_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(morph_images,p) == 0))
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
geometry[MagickPathExtent],
*text;
DrawInfo
*annotate_info;
ImageInfo
*image_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
image_info=AcquireImageInfo();
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
text=InterpretImageProperties(image_info,(Image *) image,caption,
exception);
image_info=DestroyImageInfo(image_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*
(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
if (GetPixelWriteMask(random_image,q) == 0)
{
q+=GetPixelChannels(random_image);
continue;
}
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image,exception);
(void) NegateImage(dodge_image,MagickFalse,exception);
(void) TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(image,GetPixelRed(left_image,p),r);
SetPixelGreen(image,GetPixelGreen(right_image,q),r);
SetPixelBlue(image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*interpolate_view,
*swirl_view;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
swirl_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha != OpaqueAlpha)
swirl_image->alpha_trait=BlendPixelTrait;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,swirl_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
if (GetPixelWriteMask(image,p) == 0)
{
SetPixelBackgoundColor(swirl_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(swirl_image);
continue;
}
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait swirl_traits=GetPixelChannelTraits(swirl_image,channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(image,interpolate_view,swirl_image,
method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double)
((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait tint_traits=GetPixelChannelTraits(tint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(tint_traits == UndefinedPixelTrait))
continue;
if (((tint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(tint_image,channel,p[i],q);
continue;
}
}
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0*
(weight*weight)));
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas_image,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows,
MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas_image,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.alpha != OpaqueAlpha)
wave_image->alpha_trait=BlendPixelTrait;
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(image,image_view,wave_image,method,
(double) x,(double) (y-sine_map[x]),q,exception);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns),
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
estimate_time_step.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
//
// Main authors: Kazem Kamran
// Jordi Rubio
//
#if !defined(KRATOS_ESTIMATE_TIME_STEP )
#define KRATOS_ESTIMATE_TIME_STEP
// System includes
#include <string>
#include <iostream>
#include <algorithm>
#include <cmath> // Added by Jordi Rubio
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
//#include "geometries/tetrahedra_3d_4.h"
#include "geometries/point.h"
#include "thermo_mechanical_application.h"
#include "includes/c2c_variables.h"
// #include "custom_conditions/environment_contact.h"
//#include "includes/variables.h"
#include "utilities/openmp_utils.h"
#include "includes/convection_diffusion_settings.h"
namespace Kratos
{
template<unsigned int TDim>
class EstimateTimeStep
{
public:
KRATOS_CLASS_POINTER_DEFINITION(EstimateTimeStep<TDim>);
//**********************************************************************************************
//**********************************************************************************************
//
double ComputeDt(ModelPart& ThisModelPart, const double dist_max, const double CFL, const double dt_min ,const double dt_max )
{
KRATOS_TRY
const unsigned int NumNodes = TDim +1;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector ElementPartition;
OpenMPUtils::DivideInPartitions(ThisModelPart.NumberOfElements(),NumThreads,ElementPartition);
std::vector<double> MaxProj(NumThreads,0.0);
// NumThreads = 1;
#pragma omp parallel shared(MaxProj)
{
int k = OpenMPUtils::ThisThread();
ModelPart::ElementIterator ElemBegin = ThisModelPart.ElementsBegin() + ElementPartition[k];
ModelPart::ElementIterator ElemEnd = ThisModelPart.ElementsBegin() + ElementPartition[k+1];
double& rMaxProj = MaxProj[k];
double Area;
array_1d<double, NumNodes> N;
array_1d<double, NumNodes> dist_vec;
BoundedMatrix<double, NumNodes, TDim> DN_DX;
for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
// Get the element's geometric parameters
Geometry< Node<3> >& rGeom = itElem->GetGeometry();
double ele_dist = 0.0;
for (unsigned int kk = 0; kk < rGeom.size(); kk++){
double dist = rGeom[kk].FastGetSolutionStepValue(DISTANCE);
dist_vec[kk] = dist;
ele_dist += fabs(dist);
}
ele_dist /= NumNodes;
if(ele_dist <= dist_max)
{
GeometryUtils::CalculateGeometryData(rGeom, DN_DX, N, Area);
// Elemental Velocity
array_1d<double,TDim> ElementVel = N[0]*itElem->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
for (unsigned int i = 1; i < NumNodes; ++i)
noalias(ElementVel) += N[i]*rGeom[i].FastGetSolutionStepValue(VELOCITY);
//compute normal velocity
array_1d<double, TDim> grad_dist = prod(trans(DN_DX),dist_vec);
double norm_grad_dist = norm_2(grad_dist); //grad_dist[0]*grad_dist[0] + grad_dist[1]*grad_dist[1] + grad_dist[2]*grad_dist[2];
// norm_grad_dist = sqrt(norm_grad_dist);
double normal_speed = inner_prod(ElementVel,grad_dist);
if(norm_grad_dist > 0.0)
{
normal_speed /= norm_grad_dist;
noalias(ElementVel) = (normal_speed/norm_grad_dist)*grad_dist;
}
else noalias(ElementVel) = ZeroVector(3);
// // Velocity norm
// double VelNorm = ElementVel[0]*ElementVel[0];
// for (unsigned int d = 1; d < TDim; ++d)
// VelNorm += ElementVel[d]*ElementVel[d];
// VelNorm = sqrt(VelNorm);
// Maximum element size along the direction of velocity
for (unsigned int i = 0; i < NumNodes; ++i)
{
double Proj = 0.0;
for (unsigned int d = 0; d < TDim; ++d)
Proj += ElementVel[d]*DN_DX(i,d);
Proj = fabs(Proj);
if (Proj > rMaxProj) rMaxProj = Proj;
}
}
}
}
// Obtain the maximum projected element size (compare thread results)
double Max = 0.0;
for (int k = 0; k < NumThreads; ++k)
if (Max < MaxProj[k]) Max = MaxProj[k];
// Dt to obtain desired CFL
double dt = CFL / Max;
if(dt > dt_max)
dt = dt_max;
else if(dt < dt_min)
dt = dt_min;
//perform mpi sync if needed
double global_dt = dt;
dt = ThisModelPart.GetCommunicator().GetDataCommunicator().MinAll(global_dt);
return dt;
KRATOS_CATCH("")
}
/* Compute solidificatio nand cooling DT */
//*******************************************************************
//************ COMPUTE SOLIDIFICATION DT *******************
//*******************************************************************
double ComputeSolidificationCoolingDt(ModelPart& ThisModelPart,
const double solidification_percent,
const double max_cooling_delta_temp,
const double change_in_shrinkage,
const double limit_of_mushy_zone,
const bool improve_solidification_tracking,
const double dt_min,
const double dt_max)
{
KRATOS_TRY
// const int NumNodes = TDim +1;
// is_cold = 0;
const double current_dt = ThisModelPart.GetProcessInfo()[DELTA_TIME];
const int is_solidified = ThisModelPart.GetProcessInfo()[IS_SOLIDIFIED];
//const bool improve_solidification_tracking = true;
const double low_boundary_over_mushy_zone = 5.0;
int global_is_solidified = is_solidified;
// ThisModelPart.GetCommunicator().MinAll(global_is_solidified);
//**********************************
//**** NOT ALL NODES ARE LIQUID ****
//**********************************
if(global_is_solidified == 0)
{
//**********************************************************************
//**** CHECK IF ALL NODES ARE OVER LIQUIDUS TEMPERATURE ****
//**********************************************************************
//pre solidification Dt
int is_hot = CheckMaxTemperature(ThisModelPart);
if( is_hot == 1 )
{
//**************************************
//**** WHEN EVERYTHING IS LIQUID ****
//**************************************
// if so, then use a maximum cooling objective of 10.0
double max_presolodification_delta_tem = std::min(10.0,max_cooling_delta_temp);
int node_size = ThisModelPart.Nodes().size();
double max_delta_temp = 0.0;
std::vector<double> mdelta(OpenMPUtils::GetNumThreads(),0.0);
#pragma omp parallel for shared(mdelta)
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
double old_temp = it->FastGetSolutionStepValue(TEMPERATURE,1);
// Get the Maximum on each thread
//max_delta_temp=std::max(-current_temp + old_temp,max_delta_temp);
double& md = mdelta[OpenMPUtils::ThisThread()];
md= std::max(-current_temp + old_temp, md);
}
//workaround because VS does not support omp 4.0
for (int i = 0; i < OpenMPUtils::GetNumThreads(); i++)
{
max_delta_temp = std::max(max_delta_temp, mdelta[i]);
}
max_delta_temp = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(max_delta_temp);
// Now we can keep on
if( max_delta_temp > 0.0 )
{
double new_delta_time = std::min(1.5, max_presolodification_delta_tem / max_delta_temp); //
new_delta_time *= current_dt;
// new_delta_time = 1.5*current_dt; // Previous transformationn
if( new_delta_time > dt_max)
new_delta_time = dt_max;
else if( new_delta_time < dt_min)
new_delta_time = dt_min;
return new_delta_time;
}
else
{
return current_dt;
}
}
else//solidification Dt
{
//**************************************
//**** WHEN NOT EVERYTHING IS LIQUID ****
//**************************************
double current_solidified_volume = 0.0;
double old_solidified_volume = 0.0;
double current_over_mushy_zone=0.0 ;
double old_over_mushy_zone=0.0;
double tot_vol = 0.0;
std::vector<double> mdelta(OpenMPUtils::GetNumThreads(),0.0);
//double max_delta_temp=0.0;
int node_size = ThisModelPart.Nodes().size();
#pragma omp parallel for reduction(+:current_solidified_volume,old_solidified_volume, current_over_mushy_zone, old_over_mushy_zone,tot_vol)
for (int ii = 0; ii < node_size; ii++)
{
// Now we look for the Solidifcation Volume
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double vol = it->GetValue(NODAL_VOLUME);
double& md= mdelta[OpenMPUtils::ThisThread()];
if (vol > md) { md = vol; }
double current_S = it->FastGetSolutionStepValue(SOLIDFRACTION);
double old_S = it->FastGetSolutionStepValue(SOLIDFRACTION,1);
if(current_S>=limit_of_mushy_zone) { current_over_mushy_zone+= vol; }
if(old_S>=limit_of_mushy_zone) { old_over_mushy_zone+= vol; }
current_solidified_volume += vol*current_S;
old_solidified_volume += vol*old_S;
tot_vol += vol;
//filling solidifiacation time
//double is_visited = it->FastGetSolutionStepValue(IS_VISITED);
//if(is_visited == 0.0 && current_S == 1.0){
// it->FastGetSolutionStepValue(IS_VISITED) = 1.0;
//double solid_time = ThisModelPart.GetProcessInfo()[TIME];
//double modulus = ThisModelPart.GetProcessInfo()[K0];
//it->FastGetSolutionStepValue(SOLIDIF_TIME) = solid_time;
//it->FastGetSolutionStepValue(SOLIDIF_MODULUS) = modulus*sqrt(solid_time);
//}
// Now for the maximum change in temperature
//double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
//double old_temp = it->FastGetSolutionStepValue(TEMPERATURE,1);
// max_delta_temp=std::max(-current_temp + old_temp,max_delta_temp);
}
//workaround because VS does not support omp 4.0
double max_nodal_volume;
for (int i = 0; i < OpenMPUtils::GetNumThreads(); i++)
{
max_nodal_volume = std::max(max_nodal_volume, mdelta[i]);
}
current_solidified_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(current_solidified_volume);
old_solidified_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(old_solidified_volume);
tot_vol = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(tot_vol);
current_over_mushy_zone = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(current_over_mushy_zone);
old_over_mushy_zone = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(old_over_mushy_zone);
max_nodal_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(max_nodal_volume);
if(tot_vol == 0.0) KRATOS_THROW_ERROR(std::logic_error, "inside ComputeSolidificationCoolingDt: total volume is zero!", "")
if(current_solidified_volume >= tot_vol)
{
ThisModelPart.GetProcessInfo()[IS_SOLIDIFIED] = 1;
return current_dt;
}
else
{
double delta_solid = current_solidified_volume - old_solidified_volume;
double delta_over_mushy_zone=current_over_mushy_zone-old_over_mushy_zone;
if( delta_solid > 0.0 || delta_over_mushy_zone > 0.0 )
{
delta_solid /= tot_vol;
delta_over_mushy_zone /= tot_vol;
if(delta_solid<=0.0){delta_solid=delta_over_mushy_zone*solidification_percent/change_in_shrinkage;} //It sets the value so that in next step we get exactly the same value
double solidification_ratio = solidification_percent / delta_solid;
double limited_change_in_shrinkage_ratio = 0.0;
if (delta_over_mushy_zone <= 0.0)
{
delta_over_mushy_zone = delta_solid*change_in_shrinkage / solidification_percent; // It sets the value so that in next step we get exactly the same value
limited_change_in_shrinkage_ratio = change_in_shrinkage;
}
else
{
if (improve_solidification_tracking == true)
{
double lower_limit = (max_nodal_volume/ tot_vol)*low_boundary_over_mushy_zone;
double liquid_percent = 1.00 - (current_over_mushy_zone / tot_vol);
limited_change_in_shrinkage_ratio = std::min(change_in_shrinkage, liquid_percent*0.5);//delta_over_mushy_zone*0.8);
limited_change_in_shrinkage_ratio = std::max(limited_change_in_shrinkage_ratio, lower_limit);
KRATOS_WATCH(lower_limit)
KRATOS_WATCH(change_in_shrinkage)
KRATOS_WATCH(limited_change_in_shrinkage_ratio)
}
else
{
limited_change_in_shrinkage_ratio = change_in_shrinkage;
}
}
double change_in_shrinkage_ratio= limited_change_in_shrinkage_ratio / delta_over_mushy_zone;
double K = std::min(solidification_ratio, change_in_shrinkage_ratio);
double new_dt = std::min(1.5, K ) * current_dt;
if( new_dt > dt_max) new_dt = dt_max;
else if( new_dt < dt_min) new_dt = dt_min;
return new_dt;
}
else
{
return current_dt;
}
}
}
}
else //coling delta_t
{
//double cooling_dt_max = dt_max;//30.0*dt_max;
int node_size = ThisModelPart.Nodes().size();
double max_delta_temp = 0.0;
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
double old_temp = it->FastGetSolutionStepValue(TEMPERATURE,1);
max_delta_temp=std::max(-current_temp+old_temp,max_delta_temp);
}
max_delta_temp = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(max_delta_temp);
if( max_delta_temp > 0.0 ){
double new_delta_time = max_cooling_delta_temp / max_delta_temp;
new_delta_time *= current_dt;
if( new_delta_time > dt_max)
new_delta_time = dt_max;
else if( new_delta_time < dt_min)
new_delta_time = dt_min;
return new_delta_time;
}
else
{
// is_cold = 1;
return current_dt;
}
}
KRATOS_CATCH("")
}
//************************************************************************************************
//*************** FIND AN ESTIMATION FOR SOLIDIFICATION TIME. NO VIRTUAL MOLD *******************
//************************************************************************************************
double EstimateSolidificationTimeNoVirtualMould(ModelPart& ThisModelPart)
{
KRATOS_TRY
double solidification_time = 0.0;
const double density = ThisModelPart.GetProcessInfo()[DENSITY];
const double cc= ThisModelPart.GetProcessInfo()[SPECIFIC_HEAT];
const double htc= ThisModelPart.GetProcessInfo()[HTC];
const double mould_temperature=ThisModelPart.GetProcessInfo()[MOLD_AVERAGE_TEMPERATURE];
const double TT_solid = ThisModelPart.GetProcessInfo()[SOLID_TEMPERATURE];
const double TT_liquid = ThisModelPart.GetProcessInfo()[FLUID_TEMPERATURE];
const double LL = ThisModelPart.GetProcessInfo()[LATENT_HEAT];
double tot_vol = 0.0;
double tot_area = 0.0;
int node_size = ThisModelPart.Nodes().size();
for (int ii = 0; ii < node_size; ii++)
// Compute Part Volume and Area
{
ModelPart::NodesContainerType::iterator it_nd = ThisModelPart.NodesBegin() + ii;
double vol = it_nd->GetValue(NODAL_VOLUME);
tot_vol += vol;
double area = it_nd->FastGetSolutionStepValue(NODAL_PAUX);
tot_area += area;
}
// Check Area is not 0
if ( tot_area == 0.0 || tot_vol == 0.0) KRATOS_THROW_ERROR(std::invalid_argument,"AREA or VOLUME is Zero", "");
// Formula for stimating solidification time
solidification_time = density * ( cc * ( TT_liquid - TT_solid) + LL) / (htc * 0.5*(TT_solid-mould_temperature));
solidification_time *= pow(tot_vol/tot_area , 0.8);
return solidification_time;
KRATOS_CATCH("")
}
//************************************************************************************************
//**************** FIND AN ESTIMATION FOR SOLIDIFICATION TIME. VIRTUAL MOLD *********************
//************************************************************************************************
/* For solving this we are going to suppose that we dissipate all the energy through the mould outer surface.
We Estimate the inner Energy of the System as the SUM of 3 contributions. The energy loss needed to cool the
mart, the energy loss needed to make the part chage its phase and the energy needed to cool the mould. The
contribution of the mould only is considered if positive.
All terms are linearized with respect to the temperature, so that.
E_1=V_{part}*C_{part}*\rho_{part}*T
E_2=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))*T
E_3=LH*\rho*V_{part}*(T-T_{end})/(T_{ini}-T_{end})
Now we have that, the q (Energy time derivative is)
dE/dt=HTC_{env}*Sfact*A_{part}*(T-T_{env})
We can set it as ODE, and solve analytacally (recall this is a linealization, but will be enough for our purpose)
dE_1/dT=V_{part}*C_{part}*\rho_{part}
dE_2/dT=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))
dE_3/dT=LH*\rho*V_{part}/(T_{ini}-T_{end})
Solving the ODE,we have that:
\Delta t= =( (dE_1/dT+dE_2/dT+dE_2/dT)/ HTC_{env}*Sfact*A_{part} )*ln( ((T_{ini}- T_{env})/(T_{end}- T_{env}) )
As starting temperature we suppose the Average temperature, and as initial mould temperature, we suppose the initial mouls temperature
*/
double EstimateSolidificationTime(ModelPart& ThisModelPart)
{
KRATOS_TRY
double solidification_time = 0.0;
// Auxiliaty variables
double dE1=0.0;
double dE2=0.0;
double dE3=0.0;
double DENOM=0.0;
// Environment and part variables
const double ambient_temperature=ThisModelPart.GetProcessInfo()[AMBIENT_TEMPERATURE];
const double LL = ThisModelPart.GetProcessInfo()[LATENT_HEAT];
const double density = ThisModelPart.GetProcessInfo()[DENSITY];
const double cc= ThisModelPart.GetProcessInfo()[SPECIFIC_HEAT];
const double initial_temperature= ThisModelPart.GetProcessInfo()[AVERAGE_TEMPERATURE];
const double stop_temperature= ThisModelPart.GetProcessInfo()[SOLID_TEMPERATURE];
// Loop Over the nodes - Compute E1 term
double tot_vol = 0.0;
int node_size = ThisModelPart.Nodes().size();
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it_nd = ThisModelPart.NodesBegin() + ii;
double vol = it_nd->GetValue(NODAL_VOLUME);
tot_vol += vol;
vol=pow(vol,1.0);
// dE1 - First Term
//dE_1/dT=V_{part}*C_{part}*\rho_{part}
dE1+= vol*density*cc;
// dE3 - Third Term
//dE_3/dT=LH*\rho*V_{part}/(T_{ini}-T_{end})
dE3+=LL*density*vol/(initial_temperature-stop_temperature);
}
double tot_area=0.0;
double avg_conductivity=0.0;
double avg_density=0.0;
double avg_sheat=0.0;
// Loop over the conditions Compute E2, E3 and Denom term
for (ModelPart::ConditionIterator itCond = ThisModelPart.ConditionsBegin(); itCond != ThisModelPart.ConditionsEnd(); itCond++ )
{
// Generate the Geometry of the condition
Condition::GeometryType& rGeom = itCond->GetGeometry();
const double mould_density= itCond->GetProperties()[MOLD_DENSITY];
const double mould_specific_heat= itCond->GetProperties()[MOLD_SPECIFIC_HEAT];
const double mould_thickness = itCond->GetProperties()[MOLD_THICKNESS];
const double mould_vfact= itCond->GetProperties()[MOLD_VFACT];
const double mould_sfact= itCond->GetProperties()[MOLD_SFACT];
const double mould_htc_env= itCond->GetProperties()[MOLD_HTC_ENVIRONMENT];
const double mould_cond=itCond->GetProperties()[MOLD_CONDUCTIVITY];
//const double mould_conductivity = itCond->GetProperties()[MOLD_CONDUCTIVITY];
const double mould_temperature = itCond->GetProperties()[MOLD_TEMPERATURE];
double tarea=rGeom.DomainSize();
const double condition_area=pow(tarea,1.0);
tot_area+=tarea;
// dE2 - Second Term
//dE_2/dT=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))
double aux =condition_area*mould_thickness*mould_vfact*mould_density*mould_specific_heat;
double aux2=(mould_temperature- stop_temperature)/(initial_temperature-stop_temperature ) ;
dE2+=std::max(aux*aux2,0.0);
// Denom.
// HTC_{env}*Sfact*A_{part} )
DENOM+= mould_htc_env*condition_area*mould_sfact;
// To be used by Chorinov Formulation
avg_conductivity+=mould_cond*tarea;
avg_density+=mould_density*tarea;
avg_sheat+=mould_specific_heat*tarea;
}
solidification_time = ((dE1+dE2+dE3)/DENOM)*log( (initial_temperature-ambient_temperature)/(stop_temperature-ambient_temperature) );
// Now we will compute Chorinov's Rule
avg_conductivity/=tot_area;
avg_density/=tot_area;
avg_sheat/=tot_area;
double solidification_time_chorinov=0.0;
//const double htc= ThisModelPart.GetProcessInfo()[HTC];
//const double mould_temperature=ThisModelPart.GetProcessInfo()[MOLD_AVERAGE_TEMPERATURE];
solidification_time_chorinov=pow(density*LL/fabs(initial_temperature-stop_temperature),2)*(3.1416/(4*avg_conductivity*avg_density*avg_sheat));
solidification_time_chorinov*=1+(cc*pow((initial_temperature-stop_temperature)/LL,2));
solidification_time_chorinov*=pow(tot_vol/tot_area,1.5);
return std::max(solidification_time,solidification_time_chorinov);
KRATOS_CATCH("")
}
double CheckStopTemperature(ModelPart& ThisModelPart,const double stop_temperature)
{
KRATOS_TRY
const double avg_temp = ThisModelPart.GetProcessInfo()[AVERAGE_TEMPERATURE];
double delta_max_temp = avg_temp - stop_temperature;
double sum_temp = 0.0;
int node_size = ThisModelPart.Nodes().size();
#pragma omp parallel for reduction(+:sum_temp)
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it_nd = ThisModelPart.NodesBegin() + ii;
double temp = it_nd->FastGetSolutionStepValue(TEMPERATURE);
sum_temp += std::max(temp,0.99999*stop_temperature); // before temp
}
sum_temp /= double(node_size);
sum_temp -= stop_temperature;
double cooled_percent = 100.0*(1.0 - sum_temp/delta_max_temp);
return cooled_percent;
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
//
double ComputeSurfaceWaveDt(ModelPart& ThisModelPart, const double total_volume, const double edge_size,const double max_Dt)
{
KRATOS_TRY
const double cutted_area = ThisModelPart.GetProcessInfo()[CUTTED_AREA];
const double wet_volume = ThisModelPart.GetProcessInfo()[WET_VOLUME];
double compare_percente = 0.01;
if( wet_volume < compare_percente * total_volume || cutted_area <= 0.0)
return max_Dt;
double empty_volume = total_volume - wet_volume;
double dist_1 = empty_volume/cutted_area;
double dist_2 = cutted_area/dist_1;
double dist_3 = sqrt(cutted_area);
double max_dist = dist_1;
max_dist = (dist_1 > dist_2) ? dist_1 : dist_2;
max_dist = (max_dist > dist_3) ? max_dist : dist_3;
//max_dist = 5.0*dist_3;
double inv_sqrt_gravity = 0.319275428;
double wave_dt =inv_sqrt_gravity * edge_size/sqrt(max_dist);
return ((wave_dt>max_Dt) ? max_Dt : wave_dt);
KRATOS_CATCH("")
}
int CheckIsInTransition(ModelPart& ThisModelPart)
{
double const sol_temp = ThisModelPart.GetProcessInfo()[SOLID_TEMPERATURE] ;
double const liq_temp = ThisModelPart.GetProcessInfo()[FLUID_TEMPERATURE] ;
int is_in_range_point = 0;
int node_size = ThisModelPart.Nodes().size();
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
if( current_temp >sol_temp && current_temp<liq_temp){
is_in_range_point = 1;
break;
}
}
is_in_range_point = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(is_in_range_point);
return (is_in_range_point==1)? 1 : 0;
}
//**********************************************************************************************
//**********************************************************************************************
//
double EstimateCoolingTime(ModelPart& ThisModelPart,const double stop_temperature)
{
/* For solving this we are going to suppose that we dissipate all the energy through the mould outer surface.
We Estimate the inner Energy of the System as the SUM of 3 contributions. The energy loss needed to cool the
mart, the energy loss needed to make the part chage its phase and the energy needed to cool the mould. The
contribution of the mould only is considered if positive.
All terms are linearized with respect to the temperature, so that.
E_1=V_{part}*C_{part}*\rho_{part}*T
E_2=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))*T
E_3=LH*\rho*V_{part}*(T-T_{end})/(T_{ini}-T_{end})
Now we have that, the q (Energy time derivative is)
dE/dt=HTC_{env}*Sfact*A_{part}*(T-T_{env})
We can set it as ODE, and solve analytacally (recall this is a linealization, but will be enough for our purpose)
dE_1/dT=V_{part}*C_{part}*\rho_{part}
dE_2/dT=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))
dE_3/dT=LH*\rho*V_{part}/(T_{ini}-T_{end})
Solving the ODE,we have that:
\Delta t= =( (dE_1/dT+dE_2/dT+dE_2/dT)/ HTC_{env}*Sfact*A_{part} )*ln( ((T_{ini}- T_{env})/(T_{end}- T_{env}) )
As starting temperature we suppose the Average temperature, and as initial mould temperature, we suppose the initial mouls temperature
*/
// Auxiliaty variables
double CoolingTime;
double dE1=0.0;
double dE2=0.0;
double dE3=0.0;
double DENOM=0.0;
// Environment and part variables
const double ambient_temperature=ThisModelPart.GetProcessInfo()[AMBIENT_TEMPERATURE];
const double LL = ThisModelPart.GetProcessInfo()[LATENT_HEAT];
const double density = ThisModelPart.GetProcessInfo()[DENSITY];
const double cc= ThisModelPart.GetProcessInfo()[SPECIFIC_HEAT];
const double initial_temperature= ThisModelPart.GetProcessInfo()[AVERAGE_TEMPERATURE];
// Loop Over the nodes - Compute E1 term
double tot_vol = 0.0;
int node_size = ThisModelPart.Nodes().size();
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it_nd = ThisModelPart.NodesBegin() + ii;
double vol = it_nd->GetValue(NODAL_VOLUME);
tot_vol += vol;
vol=pow(vol,0.8);
// dE1 - First Term
//dE_1/dT=V_{part}*C_{part}*\rho_{part}
dE1+= vol*density*cc;
// dE3 - Third Term
//dE_3/dT=LH*\rho*V_{part}/(T_{ini}-T_{end})
dE3+=LL*density*vol/(initial_temperature-stop_temperature);
}
double tot_area=0.0;
double avg_conductivity=0.0;
double avg_density=0.0;
double avg_sheat=0.0;
double avg_env_htc=0.0;
// Loop over the conditions Compute E2, E3 and Denom term
for (ModelPart::ConditionIterator itCond = ThisModelPart.ConditionsBegin(); itCond != ThisModelPart.ConditionsEnd(); itCond++ )
{
// Generate the Geometry of the condition
Condition::GeometryType& rGeom = itCond->GetGeometry();
const double mould_density= itCond->GetProperties()[MOLD_DENSITY];
const double mould_specific_heat= itCond->GetProperties()[MOLD_SPECIFIC_HEAT];
const double mould_thickness = itCond->GetProperties()[MOLD_THICKNESS];
const double mould_vfact= itCond->GetProperties()[MOLD_VFACT];
const double mould_sfact= itCond->GetProperties()[MOLD_SFACT];
const double mould_htc_env= itCond->GetProperties()[MOLD_HTC_ENVIRONMENT];
const double mould_conductivity = itCond->GetProperties()[MOLD_CONDUCTIVITY];
const double mould_temperature = itCond->GetProperties()[MOLD_TEMPERATURE];
double tarea=rGeom.DomainSize();
tot_area+=tarea;
const double condition_area=pow(fabs(rGeom.DomainSize()),1.0);
// dE2 - Second Term
//dE_2/dT=V_{mould}*V_{fact}*\rho_{mould}*C_{mould}*max(0,(T_{mould}-T_{end})/(T_{ini}-T_{end}))
double aux =condition_area*mould_thickness*mould_vfact*mould_density*mould_specific_heat;
double aux2=(mould_temperature- stop_temperature)/(initial_temperature-stop_temperature ) ;
dE2+=std::max(aux*aux2,0.0);
// Denom.
// HTC_{env}*Sfact*A_{part} )
DENOM+= mould_htc_env*condition_area*mould_sfact;
// To be used by Chorinov Formulation
avg_conductivity+=mould_conductivity*tarea;
avg_density+=mould_density*tarea;
avg_sheat+=mould_specific_heat*tarea;
avg_env_htc+=tarea*mould_htc_env*mould_sfact;
}
CoolingTime = ((dE1+dE2+dE3)/DENOM)*log( (initial_temperature-ambient_temperature)/(stop_temperature-ambient_temperature) );
// Now we will compute Chorinov's Rule
avg_conductivity/=tot_area;
avg_density/=tot_area;
avg_sheat/=tot_area;
avg_env_htc/=tot_area;
double cooling_time_chorinov=0.0;
//const double htc= ThisModelPart.GetProcessInfo()[HTC];
const double solid_temp= ThisModelPart.GetProcessInfo()[SOLID_TEMPERATURE];
//const double mould_temperature=ThisModelPart.GetProcessInfo()[MOLD_AVERAGE_TEMPERATURE];
cooling_time_chorinov=pow(density*LL/fabs(initial_temperature-solid_temp),2)*(3.1416/(4*avg_conductivity*avg_density*avg_sheat));
cooling_time_chorinov*=1+(cc*pow((initial_temperature-solid_temp)/LL,2));
cooling_time_chorinov*=pow(tot_vol/tot_area,2);
// Now we compute the time from solidification to cooling
double time_to_cool=(tot_vol*cc*density*fabs(solid_temp-stop_temperature))/(avg_env_htc*tot_area*(0.5*initial_temperature+0.5*stop_temperature-ambient_temperature));
cooling_time_chorinov+=time_to_cool;
return std::max(CoolingTime,cooling_time_chorinov);
}
/////////////////////////////////////////////////////////////////////////
int CheckMinTemperature(ModelPart& ThisModelPart)
{
double last_temp = 1e9;
//double is_hot_point = 1.0;
int node_size = ThisModelPart.Nodes().size();
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
if( current_temp < last_temp){last_temp = current_temp; }
}
return ThisModelPart.GetCommunicator().GetDataCommunicator().MinAll(last_temp);
}
//private:
// /////////////////////////////////////////////////////////////////////////////
int CheckMaxTemperature(ModelPart& ThisModelPart)
{
double last_temp = ThisModelPart.GetProcessInfo()[FLUID_TEMPERATURE]; //GetTable(3).Data().back().first;
double is_hot_point = 1.0;
int node_size = ThisModelPart.Nodes().size();
//KRATOS_WATCH(omp_get_max_threads())
std::vector<double> local_is_hot_point(OpenMPUtils::GetNumThreads(),1.0);
// #pragma omp parallel for shared(local_is_hot_point)
for (int ii = 0; ii < node_size; ii++)
{
ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii;
double current_temp = it->FastGetSolutionStepValue(TEMPERATURE);
if (current_temp < last_temp) {
//is_hot_point = 0.0;
local_is_hot_point[OpenMPUtils::ThisThread()]= 0.0;
break;
}
}
//Now we finf the minimum is_hot_point among threads
for (int ii = 0; ii < OpenMPUtils::GetNumThreads(); ii++)
{
is_hot_point = std::min(is_hot_point, local_is_hot_point[ii]);
}
is_hot_point = ThisModelPart.GetCommunicator().GetDataCommunicator().MinAll(is_hot_point);
return (is_hot_point == 1.0) ? 1 : 0;
}
};
} // namespace Kratos.
#endif // ASSIGN_NO_SLIP_CONDITION defined
|
par_relax_more.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG -
* these do not go through the CF interface (hypre_BoomerAMGRelaxIF)
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int*,HYPRE_Real *,HYPRE_Real *,HYPRE_Int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Real *max_eig)
{
HYPRE_Real e_max;
HYPRE_Real row_sum, max_norm;
HYPRE_Real *A_diag_data;
HYPRE_Real *A_offd_data;
HYPRE_Real temp;
HYPRE_Real diag_value;
HYPRE_Int pos_diag, neg_diag;
HYPRE_Int A_num_rows;
HYPRE_Int *A_diag_i;
HYPRE_Int *A_offd_i;
HYPRE_Int j;
HYPRE_Int i, start;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
A_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A));
A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A));
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = 0; i < A_num_rows; i++ )
{
start = A_diag_i[i];
diag_value = A_diag_data[start];
if (diag_value > 0)
{
pos_diag++;
}
if (diag_value < 0)
{
neg_diag++;
diag_value = -diag_value;
}
row_sum = diag_value;
/*for (j = 0; j < row_length; j++)*/
for (j = start+1; j < A_diag_i[i+1]; j++)
{
row_sum += fabs(A_diag_data[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_sum += fabs(A_offd_data[j]);
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
}
/* get max across procs */
hypre_MPI_Allreduce(&max_norm, &temp, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
HYPRE_Int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
HYPRE_Real *tridiag = NULL;
HYPRE_Real *trioffd = NULL;
HYPRE_Real lambda_max ;
HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
HYPRE_Real diag;
HYPRE_Real lambda_min;
HYPRE_Real *s_data, *p_data, *ds_data, *u_data;
HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < (HYPRE_BigInt) max_iter)
max_iter = (HYPRE_Int) size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST);
trioffd = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* hypre_printf("linpack max eig est = %g\n", lambda_max);*/
/* hypre_printf("linpack min eig est = %g\n", lambda_min);*/
hypre_TFree(tridiag, HYPRE_MEMORY_HOST);
hypre_TFree(trioffd, HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real coefs[5];
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Real tmp_d;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data, *tmp_data;
HYPRE_Real diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_HOST);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
HYPRE_Int i;
HYPRE_Int relax_points[3];
HYPRE_Int relax_type = 0;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* cf == NULL --> size == 0 */
if (cf_marker == NULL)
{
hypre_assert(hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)) == 0);
}
for (i=0; i < 3; i++)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, NULL);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother -
*
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int num_its)
{
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_PCGSetTol(solver, 0.0); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
#if 0
{
HYPRE_Int myid;
HYPRE_Int num_iterations;
HYPRE_Real final_res_norm;
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
if (myid ==0)
{
hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations);
hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real*,HYPRE_Real*);
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n,HYPRE_Real *d,HYPRE_Real *e,HYPRE_Int *ierr)
{
/* System generated locals */
HYPRE_Int i__1,i__2;
HYPRE_Real d__1,d__2,c_b10 = 1.0;
/* Local variables */
HYPRE_Real c,f,g,h;
HYPRE_Int i,j,l,m;
HYPRE_Real p,r,s,c2,c3 = 0.0;
HYPRE_Int l1,l2;
HYPRE_Real s2 = 0.0;
HYPRE_Int ii;
HYPRE_Real dl1,el1;
HYPRE_Int mml;
HYPRE_Real tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
HYPRE_Real ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a,HYPRE_Real *b)
{
/* System generated locals */
HYPRE_Real ret_val,d__1,d__2,d__3;
/* Local variables */
HYPRE_Real p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (same as the one in AMS, but this allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id ;
HYPRE_Real zero = 0.0;
HYPRE_Real res;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return 0;
}
|
vednnConvolutionBackwardFilter.c |
#include "vednnConvolutionBackwardFilter.h"
#include <stdint.h>
#ifdef VEDNN_USE_OPENMP
#include <omp.h>
extern int __vednn_omp_num_threads ;
#endif
static inline vednnError_t
vednnConvolutionBackwardFilter_wrapper(
vednnConvBackwardFilter_t pFunc,
const vednnTensorParam_t * restrict pParamIn,
const void * restrict pDataIn,
const vednnTensorParam_t * restrict pParamGradOut,
const void * restrict pDataGradOut,
const vednnConvolutionParam_t * restrict pParamConv,
const vednnFilterParam_t * restrict pParamGradKernel,
void * restrict pDataGradKernel
)
{
//#ifdef VEDNN_USE_OPENMP
// if ( __vednn_omp_num_threads == 1 ) {
// int64_t gOutChannel = pParamGradOut->channel;
// int64_t group = pParamConv->group;
// int64_t gOutChannelGroup = gOutChannel / group;
//
// return pFunc(pParamIn, pDataIn, pParamGradOut, pDataGradOut,
// pParamConv, pParamGradKernel, pDataGradKernel,
// 0, gOutChannelGroup);
// }
// else {
vednnError_t rc = VEDNN_SUCCESS ;
//#pragma omp parallel reduction(|:rc)
// {
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t gOutChannel = pParamGradOut->channel;
int64_t group = pParamConv->group;
int64_t gOutChannelGroup = gOutChannel / group;
int64_t nOChannlel = gOutChannelGroup / nthreads ;
int64_t remain = gOutChannelGroup % nthreads ;
int64_t beginOChannel = nOChannlel * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myOChannel = nOChannlel + ( threadid < remain ? 1 : 0 ) ;
if( myOChannel == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
rc |= pFunc(pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel,
beginOChannel, myOChannel );
}
// }
return rc ;
// }
//#else
// return pFunc(pParamIn, pDataIn, pParamGradOut, pDataGradOut,
// pParamConv, pParamGradKernel, pDataGradKernel );
//#endif
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnConvolutionBackwardFilter(
const vednnTensorParam_t *pParamIn,
const void *pDataIn,
const vednnTensorParam_t *pParamGradOut,
const void *pDataGradOut,
const vednnFilterParam_t *pParamGradKernel,
void *pDataGradKernel,
const vednnConvolutionParam_t *pParamConv,
vednnConvolutionAlgorithm_t algo
)
{
if (algo == VEDNN_CONV_ALGORITHM_DIRECT)
{
// [todo] add variations
if ( pParamGradOut->height * pParamGradOut->width <= 16 ||
( pParamGradOut->height * pParamGradOut->width < 64
&& pParamGradOut->height * pParamGradOut->width < pParamIn->channel) ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_vecC,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if (pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1
&& pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1
&& pParamIn->height == pParamGradOut->height
&& pParamIn->width == pParamGradOut->width )
{
if (pParamGradKernel->height == 1 && pParamGradKernel->width == 1)
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker1,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if (pParamGradKernel->height == 3 && pParamGradKernel->width == 3)
{
if( pParamGradOut->width * pParamGradOut->height <= 256)
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker3_ohwU256,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if( pParamGradOut->width <= 128)
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker3_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker3,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else if (pParamGradKernel->height == 5 && pParamGradKernel->width == 5)
{
if( pParamGradOut->width <= 128)
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker5_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker5,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else if (pParamGradKernel->height == 2 && pParamGradKernel->width == 2)
{
if( pParamGradOut->width <= 128 ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker2_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame_ker2,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_padsame,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else if (pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1
&& pParamConv->padHeight == 0 && pParamConv->padWidth == 0
&& pParamGradOut->height == (pParamIn->height - pParamGradKernel->height) / pParamConv->strideHeight + 1
&& pParamGradOut->width == (pParamIn->width - pParamGradKernel->width) / pParamConv->strideWidth + 1 )
{
if ( pParamGradKernel->height == 3 && pParamGradKernel->width == 3
&& pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1
&& pParamIn->width <= 256
&& (pParamIn->width & 0x01) == 0 && (((uint64_t)pDataIn) & 0x07) == 0
&& (pParamGradOut->width & 0x01) == 0 && (((uint64_t)pDataGradOut) & 0x07) == 0 )
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_pad0_ker3_ow2X_iw2XU256_igoaligned,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if (pParamGradOut->width <= 128 && pParamGradKernel->height == 3 && pParamGradKernel->width == 3 )
{
if( pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1 ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_str1_pad0_ker3_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_ker3_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else if (pParamGradKernel->height == 1 && pParamGradKernel->width == 1) {
if (pParamGradOut->height * pParamGradOut->width <= 64 ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_ker1_ohwU64,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if (pParamGradOut->height * pParamGradOut->width <= 128 ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_ker1_ohwU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else if (pParamGradOut->width <= 32 ) {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_ker1_owU32,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_ker1,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else if (pParamGradOut->width <= 32 ){
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0_owU32,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_dil1_pad0,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
else {
if (pParamGradOut->width <= 128)
{
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_owU128,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
else {
return vednnConvolutionBackwardFilter_wrapper(
vednnConvolutionBackwardFilter_direct_default,
pParamIn, pDataIn, pParamGradOut, pDataGradOut,
pParamConv, pParamGradKernel, pDataGradKernel );
}
}
}
else {
return VEDNN_ERROR_INVALID_PARAM ;
}
}
|
masked.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// GCC generates code that does not call the runtime for the master construct
// XFAIL: gcc
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_fuzzy_address(1);
x++;
}
print_current_address(2);
}
printf("%" PRIu64 ": x=%d\n", ompt_get_thread_data()->value, x);
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_masked'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_masked_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_masked_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS_END:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS_END]]
|
Par-07-ParallelOmpForNestedOmpParallelFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {0, 0, 0, 0};
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
#pragma omp parallel for
for (int j = 0; j < 4; ++j) {
b[j] = b[j] + a[i];
}
}
}
return 0;
}
|
hillclimb.c | #define _POSIX_C_SOURCE 200112L
#define WIN32_LEAN_AND_MEAN
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define HASHN 3 // number of multiplies in hash
#define SHIFT_RANGE 1 // radius of shift search
#define CONST_RANGE 2 // radius of const search
#define QUALITY 18 // 2^N iterations of estimate samples
#define THRESHOLD 1.95 // regenerate anything lower than this estimate
static int optind = 1;
static int opterr = 1;
static int optopt;
static char *optarg;
static int
getopt(int argc, char * const argv[], const char *optstring)
{
static int optpos = 1;
const char *arg;
(void)argc;
/* Reset? */
if (optind == 0) {
optind = 1;
optpos = 1;
}
arg = argv[optind];
if (arg && strcmp(arg, "--") == 0) {
optind++;
return -1;
} else if (!arg || arg[0] != '-' || !isalnum(arg[1])) {
return -1;
} else {
const char *opt = strchr(optstring, arg[optpos]);
optopt = arg[optpos];
if (!opt) {
if (opterr && *optstring != ':')
fprintf(stderr, "%s: illegal option: %c\n", argv[0], optopt);
return '?';
} else if (opt[1] == ':') {
if (arg[optpos + 1]) {
optarg = (char *)arg + optpos + 1;
optind++;
optpos = 1;
return optopt;
} else if (argv[optind + 1]) {
optarg = (char *)argv[optind + 1];
optind += 2;
optpos = 1;
return optopt;
} else {
if (opterr && *optstring != ':')
fprintf(stderr,
"%s: option requires an argument: %c\n",
argv[0], optopt);
return *optstring == ':' ? ':' : '?';
}
} else {
if (!arg[++optpos]) {
optind++;
optpos = 1;
}
return optopt;
}
}
}
#if defined(__unix__)
#include <sys/time.h>
uint64_t
uepoch(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return 1000000LL * tv.tv_sec + tv.tv_usec;
}
#elif defined(_WIN32)
#include <windows.h>
uint64_t
uepoch(void)
{
FILETIME ft;
GetSystemTimeAsFileTime(&ft);
uint64_t tt = ft.dwHighDateTime;
tt <<= 32;
tt |= ft.dwLowDateTime;
tt /=10;
tt -= UINT64_C(11644473600000000);
return tt;
}
#endif
static uint64_t
rand64(uint64_t s[4])
{
uint64_t x = s[1] * 5;
uint64_t r = ((x << 7) | (x >> 57)) * 9;
uint64_t t = s[1] << 17;
s[2] ^= s[0];
s[3] ^= s[1];
s[1] ^= s[2];
s[0] ^= s[3];
s[2] ^= t;
s[3] = (s[3] << 45) | (s[3] >> 19);
return r;
}
struct hash {
uint32_t c[HASHN];
char s[HASHN + 1];
};
static void
hash_gen(struct hash *h, uint64_t rng[4])
{
for (int i = 0; i < HASHN; i++)
h->c[i] = (rand64(rng) >> 32) | 1u;
for (int i = 0; i <= HASHN; i++)
h->s[i] = 16;
}
static int
hash_equal(const struct hash *a, const struct hash *b)
{
for (int i = 0; i < HASHN; i++) {
if (a->c[i] != b->c[i])
return 0;
if (a->s[i] != b->s[i])
return 0;
}
return a->s[HASHN] == b->s[HASHN];
}
static void
hash_print(const struct hash *h)
{
putchar('[');
for (int i = 0; i < HASHN; i++)
printf("%2d %08lx ", h->s[i], (unsigned long)h->c[i]);
printf("%2d]", h->s[HASHN]);
fflush(stdout);
}
static int
hash_parse(struct hash *h, char *str)
{
long s;
unsigned long c;
char *end, *tok;
if (*str != '[')
return 0;
str++;
for (int i = 0; i < HASHN; i++) {
tok = strtok(i ? 0 : str, " ");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || !(*end == 0 || *end == ' '))
return 0;
h->s[i] = s;
tok = strtok(0, " ");
c = strtoul(tok, &end, 16);
if (c > 0xffffffffUL || !(*end == 0 || *end == ' '))
return 0;
h->c[i] = c;
}
tok = strtok(0, "]");
s = strtol(tok, &end, 10);
if (s < 1 || s > 31 || *end)
return 0;
h->s[HASHN] = s;
return 1;
}
static uint32_t
hash(const struct hash *h, uint32_t x)
{
for (int i = 0; i < HASHN; i++) {
x ^= x >> h->s[i];
x *= h->c[i];
}
x ^= x >> h->s[HASHN];
return x;
}
static double
estimate_bias32(const struct hash *f, uint64_t rng[4])
{
long n = 1L << QUALITY;
long bins[32][32] = {{0}};
for (long i = 0; i < n; i++) {
uint32_t x = rand64(rng);
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
#define EXACT_SPLIT 32 // must be power of two
static double
exact_bias32(const struct hash *f)
{
int i; // declare here to work around Visual Studio issue
long long bins[32][32] = {{0}};
static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT;
#pragma omp parallel for
for (i = 0; i < EXACT_SPLIT; i++) {
long long b[32][32] = {{0}};
for (uint64_t x = i * range; x < (i + 1) * range; x++) {
uint32_t h0 = hash(f, x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = hash(f, x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
b[j][k] += (set >> k) & 1;
}
}
#pragma omp critical
for (int j = 0; j < 32; j++)
for (int k = 0; k < 32; k++)
bins[j][k] += b[j][k];
}
double mean = 0.0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - 2147483648L) / 2147483648.0;
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static void
hash_gen_strict(struct hash *h, uint64_t rng[4])
{
do
hash_gen(h, rng);
while (estimate_bias32(h, rng) > THRESHOLD);
}
static uint64_t
load64(const void *buf)
{
const unsigned char *p = buf;
return (uint64_t)p[0] << 0 |
(uint64_t)p[1] << 8 |
(uint64_t)p[2] << 16 |
(uint64_t)p[3] << 24 |
(uint64_t)p[4] << 32 |
(uint64_t)p[5] << 40 |
(uint64_t)p[6] << 48 |
(uint64_t)p[7] << 56;
}
static uint64_t
mix64(uint64_t x, uint64_t y)
{
uint64_t r = 0x2b8a130976726633 * x - 0xb28cbd28446adb17 * y;
r ^= r >> 32;
return r;
}
static uint64_t
hash64(uint64_t x, uint64_t m)
{
x *= m;
x ^= x >> 32;
return x;
}
static void
mix64x4(uint64_t x[4])
{
uint64_t i = 0xf81db9ba6dabee4e;
uint64_t m = 0xb1d9e3fbc08321db;
x[0] = hash64(x[0] + 0x347534cdcf0982b6, m);
x[1] = hash64(x[1] + 0x975e2ee8f0f23aa8, m += i);
x[2] = hash64(x[2] + 0x7baf736c6c769a0b, m += i);
x[3] = hash64(x[3] + 0x884afc96accb90d9, m += i);
#define ROUND64(a, b, c, d) \
x[b] = mix64(hash64(x[a], m += i), x[b]); \
x[c] = mix64(hash64(x[a], m += i), x[c]); \
x[d] = mix64(hash64(x[a], m += i), x[d])
ROUND64(0, 1, 2, 3);
ROUND64(1, 0, 2, 3);
ROUND64(2, 0, 1, 3);
ROUND64(3, 0, 1, 3);
#undef ROUND64
}
static void
rng_init(uint64_t rng[4])
{
void *p = malloc(1024L * 1024);
rng[0] = uepoch();
rng[1] = (uint64_t)rng_init;
rng[2] = (uint64_t)rng;
rng[3] = (uint64_t)p;
free(p);
mix64x4(rng);
}
/* Modular multiplicative inverse (32-bit) */
static uint32_t
modinv32(uint32_t x)
{
uint32_t a = x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
x += x - a * x * x;
return x;
}
static void
usage(FILE *f)
{
fprintf(f, "usage: hillclimb [-EhIqs] [-p INIT] [-x SEED]\n");
fprintf(f, " -E Evaluate given pattern (-p)\n");
fprintf(f, " -h Print this message and exit\n");
fprintf(f, " -I Invert given pattern (-p) an quit\n");
fprintf(f, " -p INIT Provide an initial hash function\n");
fprintf(f, " -q Print less information (quiet)\n");
fprintf(f, " -s Quit after finding a local minima\n");
fprintf(f, " -x SEED Seed PRNG from a string (up to 32 bytes)\n");
}
int
main(int argc, char **argv)
{
int seeded = 0;
uint64_t rng[4];
struct hash cur, last = {0};
int generate = 1;
int one_shot = 0;
int quiet = 0;
int invert = 0;
int evaluate = 0;
double cur_score = -1;
int option;
while ((option = getopt(argc, argv, "EhIp:qsx:")) != -1) {
switch (option) {
case 'E': {
evaluate = 1;
} break;
case 'h': {
usage(stdout);
exit(EXIT_SUCCESS);
} break;
case 'I': {
invert = 1;
} break;
case 'p': {
if (!hash_parse(&cur, optarg)) {
fprintf(stderr, "hillclimb: invalid pattern: %s\n", optarg);
exit(EXIT_FAILURE);
}
generate = 0;
} break;
case 'q': {
quiet++;
} break;
case 's': {
one_shot = 1;
} break;
case 'x': {
unsigned char buf[32] = {0};
size_t len = strlen(optarg);
if (len > sizeof(buf)) {
fprintf(stderr, "hillclimb: seed too long (> 32 bytes)\n");
exit(EXIT_FAILURE);
}
memcpy(buf, optarg, len);
rng[0] = load64(buf + 0);
rng[1] = load64(buf + 8);
rng[2] = load64(buf + 16);
rng[3] = load64(buf + 24);
mix64x4(rng);
seeded = 1;
} break;
default:
usage(stderr);
exit(EXIT_FAILURE);
}
}
if (invert) {
if (generate) {
fprintf(stderr, "hillclimb: -I requires -p\n");
exit(EXIT_FAILURE);
}
printf("uint32_t\nhash_r(uint32_t x)\n{\n");
for (int i = 0; i < HASHN * 2 + 1; i++) {
switch (i & 1) {
case 0: {
int s = HASHN - i / 2;
printf(" x ^=");
for (int i = cur.s[s]; i < 32; i += cur.s[s])
printf(" %sx >> %d", i == cur.s[s] ? "" : "^ ", i);
printf(";\n");
} break;
case 1: {
int c = HASHN - (i + 1) / 2;
unsigned long inv = modinv32(cur.c[c]);
printf(" x *= 0x%08lx;\n", inv);
} break;
}
}
printf(" return x;\n}\n");
exit(EXIT_SUCCESS);
}
if (evaluate) {
if (generate) {
fprintf(stderr, "hillclimb: -E requires -p\n");
exit(EXIT_FAILURE);
}
hash_print(&cur);
printf(" = %.17g\n", exact_bias32(&cur));
exit(EXIT_SUCCESS);
}
if (!seeded)
rng_init(rng);
if (generate)
hash_gen_strict(&cur, rng);
for (;;) {
int found = 0;
struct hash best;
double best_score;
if (quiet < 2)
hash_print(&cur);
if (cur_score < 0)
cur_score = exact_bias32(&cur);
if (quiet < 2)
printf(" = %.17g\n", cur_score);
best = cur;
best_score = cur_score;
/* Explore around shifts */
for (int i = 0; i <= HASHN; i++) {
/* In theory the shift could drift above 31 or below 1, but
* in practice it would never get this far since these would
* be terrible hashes.
*/
for (int d = -SHIFT_RANGE; d <= +SHIFT_RANGE; d++) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.s[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
/* Explore around constants */
for (int i = 0; i < HASHN; i++) {
for (int d = -CONST_RANGE; d <= +CONST_RANGE; d += 2) {
if (d == 0) continue;
struct hash tmp = cur;
tmp.c[i] += d;
if (hash_equal(&tmp, &last)) continue;
if (quiet <= 0) {
printf(" ");
hash_print(&tmp);
}
double score = exact_bias32(&tmp);
if (quiet <= 0)
printf(" = %.17g\n", score);
if (score < best_score) {
best_score = score;
best = tmp;
found = 1;
}
}
}
if (found) {
/* Move to the lowest item found */
if (quiet < 1)
puts("CLIMB");
last = cur;
cur = best;
cur_score = best_score;
} else if (one_shot) {
/* Hit local minima, exit */
if (quiet < 1)
puts("DONE");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
break;
} else {
/* Hit local minima, reset */
if (quiet < 1)
puts("RESET");
hash_print(&cur);
printf(" = %.17g\n", cur_score);
last.s[0] = 0; // set to invalid
hash_gen_strict(&cur, rng);
cur_score = -1;
}
}
}
|
convolutiondepthwise_5x5_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + g * packn, vl) : vfmv_v_f_f16m1((__fp16)0.f, vl);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* r5 = img0.row<const __fp16>(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _sum1 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
vfloat16m1_t _r14 = vle16_v_f16m1(r1 + packn * 4, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k00, _r10, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k01, _r11, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k02, _r12, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k03, _r13, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k04, _r14, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k14, _r14, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
vfloat16m1_t _r24 = vle16_v_f16m1(r2 + packn * 4, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k10, _r20, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k11, _r21, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k12, _r22, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k13, _r23, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k14, _r24, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k24, _r24, vl);
vfloat16m1_t _r30 = vle16_v_f16m1(r3, vl);
vfloat16m1_t _r31 = vle16_v_f16m1(r3 + packn, vl);
vfloat16m1_t _r32 = vle16_v_f16m1(r3 + packn * 2, vl);
vfloat16m1_t _r33 = vle16_v_f16m1(r3 + packn * 3, vl);
vfloat16m1_t _r34 = vle16_v_f16m1(r3 + packn * 4, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k20, _r30, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k21, _r31, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k22, _r32, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k23, _r33, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k24, _r34, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k34, _r34, vl);
vfloat16m1_t _r40 = vle16_v_f16m1(r4, vl);
vfloat16m1_t _r41 = vle16_v_f16m1(r4 + packn, vl);
vfloat16m1_t _r42 = vle16_v_f16m1(r4 + packn * 2, vl);
vfloat16m1_t _r43 = vle16_v_f16m1(r4 + packn * 3, vl);
vfloat16m1_t _r44 = vle16_v_f16m1(r4 + packn * 4, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k30, _r40, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k31, _r41, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k32, _r42, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k33, _r43, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k34, _r44, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f16m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k44, _r44, vl);
vfloat16m1_t _r50 = vle16_v_f16m1(r5, vl);
vfloat16m1_t _r51 = vle16_v_f16m1(r5 + packn, vl);
vfloat16m1_t _r52 = vle16_v_f16m1(r5 + packn * 2, vl);
vfloat16m1_t _r53 = vle16_v_f16m1(r5 + packn * 3, vl);
vfloat16m1_t _r54 = vle16_v_f16m1(r5 + packn * 4, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k40, _r50, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k41, _r51, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k42, _r52, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k43, _r53, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k44, _r54, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
outptr0 += packn;
outptr1 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
r4 += packn;
r5 += packn;
}
r0 += 4 * packn + w * packn;
r1 += 4 * packn + w * packn;
r2 += 4 * packn + w * packn;
r3 += 4 * packn + w * packn;
r4 += 4 * packn + w * packn;
r5 += 4 * packn + w * packn;
outptr0 += outw * packn;
outptr1 += outw * packn;
}
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
vfloat16m1_t _r14 = vle16_v_f16m1(r1 + packn * 4, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k14, _r14, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
vfloat16m1_t _r24 = vle16_v_f16m1(r2 + packn * 4, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k24, _r24, vl);
vfloat16m1_t _r30 = vle16_v_f16m1(r3, vl);
vfloat16m1_t _r31 = vle16_v_f16m1(r3 + packn, vl);
vfloat16m1_t _r32 = vle16_v_f16m1(r3 + packn * 2, vl);
vfloat16m1_t _r33 = vle16_v_f16m1(r3 + packn * 3, vl);
vfloat16m1_t _r34 = vle16_v_f16m1(r3 + packn * 4, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k34, _r34, vl);
vfloat16m1_t _r40 = vle16_v_f16m1(r4, vl);
vfloat16m1_t _r41 = vle16_v_f16m1(r4 + packn, vl);
vfloat16m1_t _r42 = vle16_v_f16m1(r4 + packn * 2, vl);
vfloat16m1_t _r43 = vle16_v_f16m1(r4 + packn * 3, vl);
vfloat16m1_t _r44 = vle16_v_f16m1(r4 + packn * 4, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f16m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k44, _r44, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
r4 += packn;
}
r0 += 4 * packn;
r1 += 4 * packn;
r2 += 4 * packn;
r3 += 4 * packn;
r4 += 4 * packn;
}
}
}
static void convdw5x5s2_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * packn;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + g * packn, vl) : vfmv_v_f_f16m1((__fp16)0.f, vl);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k03 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
vfloat16m1_t _r14 = vle16_v_f16m1(r1 + packn * 4, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k13 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k14 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k14, _r14, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
vfloat16m1_t _r24 = vle16_v_f16m1(r2 + packn * 4, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k23 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k24 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k24, _r24, vl);
vfloat16m1_t _r30 = vle16_v_f16m1(r3, vl);
vfloat16m1_t _r31 = vle16_v_f16m1(r3 + packn, vl);
vfloat16m1_t _r32 = vle16_v_f16m1(r3 + packn * 2, vl);
vfloat16m1_t _r33 = vle16_v_f16m1(r3 + packn * 3, vl);
vfloat16m1_t _r34 = vle16_v_f16m1(r3 + packn * 4, vl);
vfloat16m1_t _k30 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k31 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k32 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k33 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k34 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f16m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k34, _r34, vl);
vfloat16m1_t _r40 = vle16_v_f16m1(r4, vl);
vfloat16m1_t _r41 = vle16_v_f16m1(r4 + packn, vl);
vfloat16m1_t _r42 = vle16_v_f16m1(r4 + packn * 2, vl);
vfloat16m1_t _r43 = vle16_v_f16m1(r4 + packn * 3, vl);
vfloat16m1_t _r44 = vle16_v_f16m1(r4 + packn * 4, vl);
vfloat16m1_t _k40 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k41 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k42 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k43 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k44 = vle16_v_f16m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f16m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k44, _r44, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
r3 += packn * 2;
r4 += packn * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
{
#pragma omp atomic
info[j].users -= 1;
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
#endif
}
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H |
flowinfo_metadata.c | /*
* Copyright 2014-2017 Nippon Telegraph and Telephone Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file flowinfo_metadata.c
* @brief Optimized flow database for dataplane, for metadata
*/
#include <stdlib.h>
#include "openflow.h"
#include "lagopus/flowdb.h"
#include "pktbuf.h"
#include "packet.h"
#include "lagopus/flowinfo.h"
#define OXM_FIELD_TYPE(field) ((field) >> 1)
#define METADATA_BITLEN (64)
static lagopus_result_t
add_flow_metadata_mask(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_metadata_mask(struct flowinfo *, struct flow *);
static struct flow *
match_flow_metadata_mask(struct flowinfo *, struct lagopus_packet *,
int32_t *);
static struct flow *
find_flow_metadata_mask(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_metadata_mask(struct flowinfo *);
static lagopus_result_t
add_flow_metadata(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_metadata(struct flowinfo *, struct flow *);
static struct flow *
match_flow_metadata(struct flowinfo *, struct lagopus_packet *, int32_t *);
static struct flow *
find_flow_metadata(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_metadata(struct flowinfo *);
static lagopus_result_t
get_match_metadata(const struct match_list *match_list,
uint64_t *metadata,
uint64_t *mask) {
const struct match *match;
TAILQ_FOREACH(match, match_list, entry) {
if (match->oxm_field == (OFPXMT_OFB_METADATA << 1) + 1) {
OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata));
OS_MEMCPY(mask, &match->oxm_value[8], sizeof(*mask));
break;
}
if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_METADATA) {
OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata));
*mask = 0xffffffffffffffff;
break;
}
}
if (match == NULL) {
return LAGOPUS_RESULT_NOT_FOUND;
}
return LAGOPUS_RESULT_OK;
}
struct flowinfo *
new_flowinfo_metadata_mask(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
self->nflow = 0;
self->nnext = 0;
self->next = malloc(1);
self->misc = new_flowinfo_eth_type();
self->add_func = add_flow_metadata_mask;
self->del_func = del_flow_metadata_mask;
self->match_func = match_flow_metadata_mask;
self->find_func = find_flow_metadata_mask;
self->destroy_func = destroy_flowinfo_metadata_mask;
}
return self;
}
static void
destroy_flowinfo_metadata_mask(struct flowinfo *self) {
struct flowinfo *flowinfo;
unsigned int i;
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flowinfo->destroy_func(flowinfo);
}
free(self->next);
free(self);
}
static void
freeup_flowinfo(void *val) {
struct flowinfo *flowinfo;
flowinfo = val;
flowinfo->destroy_func(flowinfo);
}
struct flowinfo *
new_flowinfo_metadata(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD,
freeup_flowinfo);
/* misc is not used */
self->add_func = add_flow_metadata;
self->del_func = del_flow_metadata;
self->match_func = match_flow_metadata;
self->find_func = find_flow_metadata;
self->destroy_func = destroy_flowinfo_metadata;
}
return self;
}
static void
destroy_flowinfo_metadata(struct flowinfo *self) {
lagopus_hashmap_destroy(&self->hashmap, true);
free(self);
}
static lagopus_result_t
add_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
/* new node. */
flowinfo = new_flowinfo_metadata();
flowinfo->userdata = mask;
self->next = realloc(self->next,
(unsigned long)(self->nnext + 1) *
sizeof(struct flowinfo *));
self->next[self->nnext] = flowinfo;
self->nnext++;
}
rv = flowinfo->add_func(flowinfo, flow);
} else {
rv = self->misc->add_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
return rv;
}
static lagopus_result_t
del_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return LAGOPUS_RESULT_NOT_FOUND;
}
rv = flowinfo->del_func(flowinfo, flow);
if (flowinfo->nflow == 0) {
flowinfo->destroy_func(flowinfo);
self->nnext--;
memmove(&self->next[i], &self->next[i + 1], (self->nnext - i) * sizeof(struct flowinfo **));
}
} else {
rv = self->misc->del_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
return rv;
}
static struct flow *
match_flow_metadata_mask(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
struct flow *flow[self->nnext], *matched, *alt_flow;
struct flow mismatched = {
.priority = 0,
.flags = 0,
.idle_timeout = 0,
.hard_timeout = 0,
.match_list = {NULL, NULL},
.instruction_list = {NULL, NULL},
.field_bits = 0
};
unsigned int i;
matched = &mismatched;
//#pragma omp parallel for
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flow[i] = flowinfo->match_func(flowinfo, pkt, pri);
}
for (i = 0; i < self->nnext; i++) {
if (flow[i] != NULL && flow[i]->priority > matched->priority) {
matched = flow[i];
}
}
alt_flow = self->misc->match_func(self->misc, pkt, pri);
if (alt_flow != NULL) {
matched = alt_flow;
}
if (matched == &mismatched) {
matched = NULL;
}
return matched;
}
static struct flow *
find_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return NULL;
}
} else {
flowinfo = self->misc;
}
return flowinfo->find_func(flowinfo, flow);
}
static lagopus_result_t
add_flow_metadata(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata, (void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
void *val;
flowinfo = new_flowinfo_eth_type();
val = flowinfo;
rv = lagopus_hashmap_add_no_lock(&self->hashmap, (void *)metadata,
(void *)&val, false);
if (rv != LAGOPUS_RESULT_OK) {
goto out;
}
}
rv = flowinfo->add_func(flowinfo, flow);
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
}
out:
return rv;
}
static lagopus_result_t
del_flow_metadata(struct flowinfo *self, struct flow *flow) {
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
struct flowinfo *flowinfo;
rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
rv = flowinfo->del_func(flowinfo, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
}
return rv;
}
static struct flow *
match_flow_metadata(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
uint64_t metadata;
struct flow *flow;
lagopus_result_t rv;
flow = NULL;
metadata = (pkt->oob_data.metadata & self->userdata);
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
flow = flowinfo->match_func(flowinfo, pkt, pri);
}
return flow;
}
static struct flow *
find_flow_metadata(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata,
(void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
return NULL;
}
} else {
flowinfo = self->misc;
}
return flowinfo->find_func(flowinfo, flow);
}
|
6623.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
time_dgemm.c | /**
*
* @generated d Tue Jan 7 11:45:23 2014
*
**/
#define _TYPE double
#define _PREC double
#define _LAMCH LAPACKE_dlamch_work
#define _NAME "PLASMA_dgemm"
/* See Lawn 41 page 120 */
#define _FMULS FMULS_GEMM(M, N, K)
#define _FADDS FADDS_GEMM(M, N, K)
#include "./timing.c"
static int
RunTest(int *iparam, double *dparam, real_Double_t *t_)
{
double alpha, beta;
PASTE_CODE_IPARAM_LOCALS( iparam );
LDB = max(K, iparam[IPARAM_LDB]);
LDC = max(M, iparam[IPARAM_LDC]);
/* Allocate Data */
PASTE_CODE_ALLOCATE_MATRIX( A, 1, double, LDA, K );
#pragma omp register([LDA*K]A)
PASTE_CODE_ALLOCATE_MATRIX( B, 1, double, LDB, N );
#pragma omp register([LDB*N]B)
PASTE_CODE_ALLOCATE_MATRIX( C, 1, double, LDC, N );
#pragma omp register([LDC*N]C)
PASTE_CODE_ALLOCATE_MATRIX( C2, check, double, LDC, N );
int runtime = RT_get_runtime();
int ws = RT_get_ws();
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_QUARK);
RT_set_ws(1);
}
PLASMA_dplrnt( M, K, A, LDA, 453 );
PLASMA_dplrnt( K, N, B, LDB, 5673 );
PLASMA_dplrnt( M, N, C, LDC, 740 );
LAPACKE_dlarnv_work(1, ISEED, 1, &alpha);
LAPACKE_dlarnv_work(1, ISEED, 1, &beta );
if (check)
{
memcpy(C2, C, LDC*N*sizeof(double));
}
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_OMPSS);
RT_set_ws(ws);
}
START_TIMING();
PLASMA_dgemm( PlasmaNoTrans, PlasmaNoTrans, M, N, K, alpha, A, LDA, B, LDB, beta, C, LDC );
STOP_TIMING();
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_QUARK);
RT_set_ws(1);
}
/* Check the solution */
if (check)
{
dparam[IPARAM_RES] = d_check_gemm( PlasmaNoTrans, PlasmaNoTrans, M, N, K,
alpha, A, LDA, B, LDB, beta, C, C2, LDC,
&(dparam[IPARAM_ANORM]),
&(dparam[IPARAM_BNORM]),
&(dparam[IPARAM_XNORM]));
// free(C2);
}
// free( A );
// free( B );
// free( C );
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_OMPSS);
RT_set_ws(ws);
}
return 0;
}
|
generator_spgemm_csc_bsparse.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_bsparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_bsparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
unsigned int l_n;
unsigned int l_z;
unsigned int l_column_elements;
unsigned int l_flop_count = 0;
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
LIBXSMM_UNUSED(i_values);
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset C if beta is zero */
if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0f; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* determine the correct simd pragma for each architecture */
if ( ( strcmp( i_arch, "noarch" ) == 0 ) ||
( strcmp( i_arch, "wsm" ) == 0 ) ||
( strcmp( i_arch, "snb" ) == 0 ) ||
( strcmp( i_arch, "hsw" ) == 0 ) ) {
if ( i_xgemm_desc->m > 7 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 3 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {}
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else if ( ( strcmp( i_arch, "knl" ) == 0 ) ||
( strcmp( i_arch, "skx" ) == 0 ) ||
( strcmp( i_arch, "clx" ) == 0 ) ||
( strcmp( i_arch, "cpx" ) == 0 ) ) {
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(32)\n #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
/* generate the actuel kernel */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_n = 0; l_n < (unsigned int)i_xgemm_desc->n; l_n++ ) {
l_column_elements = i_column_idx[l_n+1] - i_column_idx[l_n];
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
/* check k such that we just use rows which actually need to be multiplied */
if ( i_row_idx[i_column_idx[l_n] + l_z] < (unsigned int)i_xgemm_desc->k ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_m] += A[%u+l_m] * B[%u];\n", l_n * i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_n] + l_z]*i_xgemm_desc->lda, i_column_idx[l_n] + l_z);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
taskdep_tied_threadid.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int calc_seq(int n) {
int i, j, *buffer = (int *)malloc(sizeof(int) * n * n);
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (i == 0 && j == 0) {
buffer[i * n + j] = 1;
} else if (i == 0) {
buffer[i * n + j] = buffer[i * n + (j - 1)];
} else if (j == 0) {
buffer[i * n + j] = buffer[(i - 1) * n + j];
} else {
buffer[i * n + j] = buffer[(i - 1) * n + j] + buffer[i * n + (j - 1)];
}
}
}
int ret = buffer[(n - 1) * n + (n - 1)];
free(buffer);
return ret;
}
#define TASK_TIED_CHECK(_val_index) \
do { \
int val_index = (_val_index); \
int omp_thread_id = omp_get_thread_num(); \
ABT_thread abt_thread; \
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); \
\
_Pragma("omp taskyield") \
\
int omp_thread_id2 = omp_get_thread_num(); \
if (omp_thread_id == omp_thread_id2) { \
vals[val_index] += 1; \
} \
ABT_thread abt_thread2; \
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); \
ABT_bool abt_thread_equal; \
ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, \
&abt_thread_equal)); \
if (abt_thread_equal == ABT_TRUE) { \
vals[val_index] += 2; \
} \
\
ABT_EXIT_IF_FAIL(ABT_thread_yield()); \
\
int omp_thread_id3 = omp_get_thread_num(); \
if (omp_thread_id2 == omp_thread_id3) { \
vals[val_index] += 4; \
} \
} while (0)
int test_taskdep_tied_threadid(int num_threads) {
int n = 10;
int seq_val, task_val;
int vals[n * n];
memset(vals, 0, sizeof(int) * n * n);
#pragma omp parallel shared(task_val) firstprivate(n) num_threads(num_threads)
#pragma omp master
{
int i, j;
int *A_buf = (int *)malloc(sizeof(int) * n * n);
int **A = (int **)malloc(sizeof(int *) * n);
for(i = 0; i < n; i++) {
A[i] = A_buf + (i * n);
for(j = 0; j < n; j++) {
// Assign random values.
A[i][j] = i * n + j;
}
}
// A[i][j] is the root task.
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if (i == 0 && j == 0) {
#pragma omp task depend(out:A[i][j]) firstprivate(A, i, j)
{
TASK_TIED_CHECK(i * n + j);
A[i][j] = 1;
}
} else if (i == 0) {
#pragma omp task depend(in:A[i][j - 1]) depend(out:A[i][j]) \
firstprivate(A, i, j)
{
TASK_TIED_CHECK(i * n + j);
A[i][j] = A[i][j - 1];
}
} else if (j == 0) {
#pragma omp task depend(in:A[i - 1][j]) depend(out:A[i][j]) \
firstprivate(A, i, j)
{
TASK_TIED_CHECK(i * n + j);
A[i][j] = A[i - 1][j];
}
} else {
#pragma omp task depend(in:A[i - 1][j], A[i][j - 1]) \
depend(out:A[i][j])
{
TASK_TIED_CHECK(i * n + j);
A[i][j] = A[i - 1][j] + A[i][j - 1];
}
}
}
}
#pragma omp taskwait
task_val = A[n - 1][n - 1];
free(A);
free(A_buf);
}
seq_val = calc_seq(n);
if(seq_val != task_val) {
printf("[%d] Failed: route(%d) = %d (ANS = %d)\n", num_threads, n, task_val,
seq_val);
return 0;
}
int index;
for (index = 0; index < n * n; index++) {
if (vals[index] != 7) {
printf("vals[%d] == %d\n", index, vals[index]);
return 0;
}
}
return 1;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_taskdep_tied_threadid(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(8*t3+Nx+4,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),128*t4+126),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Psatd.h | #pragma once
#include "Constants.h"
#include "FieldSolver.h"
#include "Grid.h"
#include "Vectors.h"
#include "PmlPsatd.h"
//#include <chrono>
#include <omp.h>
namespace pfc {
template <bool ifPoisson>
class PSATDTimeStraggeredT : public SpectralFieldSolver<PSATDTimeStraggeredGridType>
{
public:
PSATDTimeStraggeredT(PSATDTimeStraggeredGrid * grid, FP dt);
void updateFields();
void updateHalfB();
void updateE();
void setPML(int sizePMLx, int sizePMLy, int sizePMLz);
void setTimeStep(FP dt);
void convertFieldsPoissonEquation();
ScalarField<complexFP> tmpJx, tmpJy, tmpJz;
bool ifCourantConditionSatisfied(FP dt) {
return true;
}
protected:
PmlSpectral<GridTypes::PSATDTimeStraggeredGridType>* getPml() {
return (PmlSpectral<GridTypes::PSATDTimeStraggeredGridType>*)pml.get();
}
void saveJ();
void assignJ(ScalarField<complexFP>& J, ScalarField<complexFP>& tmpJ);
};
template <bool ifPoisson>
inline PSATDTimeStraggeredT<ifPoisson>::PSATDTimeStraggeredT(PSATDTimeStraggeredGrid* _grid, FP dt) :
SpectralFieldSolver<GridTypes::PSATDTimeStraggeredGridType>(_grid, dt, 0.0, 0.5*dt, 0.5*dt),
tmpJx(complexGrid->sizeStorage),
tmpJy(complexGrid->sizeStorage),
tmpJz(complexGrid->sizeStorage)
{
updateDims();
updateInternalDims();
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::setPML(int sizePMLx, int sizePMLy, int sizePMLz)
{
pml.reset(new PmlPsatdTimeStraggered(this, Int3(sizePMLx, sizePMLy, sizePMLz)));
updateInternalDims();
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::setTimeStep(FP dt)
{
this->dt = dt;
this->timeShiftB = 0.5*dt;
this->timeShiftJ = 0.5*dt;
if (pml.get()) pml.reset(new PmlPsatdTimeStraggered(this, pml->sizePML));
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::assignJ(ScalarField<complexFP>& J, ScalarField<complexFP>& tmpJ)
{
const complexFP * const ptrJ = J.getData();
complexFP * const ptrTmpJ = tmpJ.getData();
const int n = J.getSize().volume();
OMP_FOR()
for (int i = 0; i < n; i++)
ptrTmpJ[i] = ptrJ[i];
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::saveJ()
{
assignJ(complexGrid->Jx, tmpJx);
assignJ(complexGrid->Jy, tmpJy);
assignJ(complexGrid->Jz, tmpJz);
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::updateFields()
{
doFourierTransform(fourier_transform::Direction::RtoC);
if (pml.get()) getPml()->updateBSplit();
updateHalfB();
if (pml.get()) getPml()->updateESplit();
updateE();
if (pml.get()) getPml()->updateBSplit();
updateHalfB();
saveJ();
doFourierTransform(fourier_transform::Direction::CtoR);
if (pml.get()) getPml()->doSecondStep();
globalTime += dt;
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::convertFieldsPoissonEquation() {
doFourierTransform(fourier_transform::Direction::RtoC);
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = this->dt * 0.5;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
if (normK == 0) {
continue;
}
K = K / normK;
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E);
complexGrid->Ex(i, j, k) -= El.x;
complexGrid->Ey(i, j, k) -= El.y;
complexGrid->Ez(i, j, k) -= El.z;
}
}
doFourierTransform(fourier_transform::Direction::CtoR);
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::updateHalfB()
{
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = 0.5 * this->dt;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
if (normK == 0) {
continue;
}
K = K / normK;
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)),
prevJ(tmpJx(i, j, k), tmpJy(i, j, k), tmpJz(i, j, k));
ComplexFP3 crossKE = cross((ComplexFP3)K, E);
ComplexFP3 crossKJ = cross((ComplexFP3)K, J - prevJ);
FP S = sin(normK*constants::c*dt*0.5), C = cos(normK*constants::c*dt*0.5);
complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = complexFP::i() * ((1 - C) / (normK*constants::c));
complexGrid->Bx(i, j, k) += -coeff1 * crossKE.x + coeff2 * crossKJ.x;
complexGrid->By(i, j, k) += -coeff1 * crossKE.y + coeff2 * crossKJ.y;
complexGrid->Bz(i, j, k) += -coeff1 * crossKE.z + coeff2 * crossKJ.z;
}
}
}
template <bool ifPoisson>
inline void PSATDTimeStraggeredT<ifPoisson>::updateE()
{
const Int3 begin = updateComplexEAreaBegin;
const Int3 end = updateComplexEAreaEnd;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
if (normK == 0) {
complexGrid->Ex(i, j, k) += dt * complexGrid->Jx(i, j, k);
complexGrid->Ey(i, j, k) += dt * complexGrid->Jy(i, j, k);
complexGrid->Ez(i, j, k) += dt * complexGrid->Jz(i, j, k);
continue;
}
K = K / normK;
ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k));
ComplexFP3 crossKB = cross((ComplexFP3)K, B);
ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J);
FP S = sin(normK*constants::c*dt*0.5);
complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = 2 * S / (normK*constants::c),
coeff3 = coeff2 - dt;
complexGrid->Ex(i, j, k) += coeff1 * crossKB.x - coeff2 * J.x + coeff3 * Jl.x;
complexGrid->Ey(i, j, k) += coeff1 * crossKB.y - coeff2 * J.y + coeff3 * Jl.y;
complexGrid->Ez(i, j, k) += coeff1 * crossKB.z - coeff2 * J.z + coeff3 * Jl.z;
}
}
}
// provides k \cdot E = 0 always (k \cdot J = 0 too)
template <>
inline void PSATDTimeStraggeredT<true>::updateE()
{
const Int3 begin = updateComplexEAreaBegin;
const Int3 end = updateComplexEAreaEnd;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
if (normK == 0) {
complexGrid->Ex(i, j, k) += dt * complexGrid->Jx(i, j, k);
complexGrid->Ey(i, j, k) += dt * complexGrid->Jy(i, j, k);
complexGrid->Ez(i, j, k) += dt * complexGrid->Jz(i, j, k);
continue;
}
K = K / normK;
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k));
ComplexFP3 crossKB = cross((ComplexFP3)K, B);
ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E);
ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J);
FP S = sin(normK*constants::c*dt*0.5);
complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = 2 * S / (normK*constants::c),
coeff3 = coeff2 - dt;
complexGrid->Ex(i, j, k) += -El.x + coeff1 * crossKB.x - coeff2 * (J.x - Jl.x);
complexGrid->Ey(i, j, k) += -El.y + coeff1 * crossKB.y - coeff2 * (J.y - Jl.y);
complexGrid->Ez(i, j, k) += -El.z + coeff1 * crossKB.z - coeff2 * (J.z - Jl.z);
}
}
}
template <bool ifPoisson>
class PSATDT : public SpectralFieldSolver<PSATDGridType>
{
public:
PSATDT(PSATDGrid* grid, FP dt);
void updateFields();
virtual void updateEB();
void setPML(int sizePMLx, int sizePMLy, int sizePMLz);
void setTimeStep(FP dt);
void convertFieldsPoissonEquation();
bool ifCourantConditionSatisfied(FP dt) {
return true;
}
private:
PmlSpectralTimeStraggered<GridTypes::PSATDGridType>* getPml() {
return (PmlSpectralTimeStraggered<GridTypes::PSATDGridType>*)pml.get();
}
};
template <bool ifPoisson>
inline PSATDT<ifPoisson>::PSATDT(PSATDGrid* _grid, FP dt) :
SpectralFieldSolver<GridTypes::PSATDGridType>(_grid, dt, 0.0, 0.0, 0.5*dt)
{
updateDims();
updateInternalDims();
}
template <bool ifPoisson>
inline void PSATDT<ifPoisson>::setPML(int sizePMLx, int sizePMLy, int sizePMLz)
{
pml.reset(new PmlPsatd(this, Int3(sizePMLx, sizePMLy, sizePMLz)));
updateInternalDims();
}
template <bool ifPoisson>
inline void PSATDT<ifPoisson>::setTimeStep(FP dt)
{
this->dt = dt;
this->timeShiftJ = 0.5*dt;
if (pml.get()) pml.reset(new PmlPsatd(this, pml->sizePML));
}
template <bool ifPoisson>
inline void PSATDT<ifPoisson>::updateFields() {
// std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
doFourierTransform(fourier_transform::Direction::RtoC);
//std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
//std::chrono::milliseconds timeRtoC = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
//std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now();
if (pml.get()) getPml()->updateBSplit();
updateEB();
if (pml.get()) getPml()->updateESplit();
updateEB();
if (pml.get()) getPml()->updateBSplit();
//std::chrono::steady_clock::time_point t4 = std::chrono::steady_clock::now();
//std::chrono::milliseconds timeSolver = std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3);
//std::chrono::steady_clock::time_point t5 = std::chrono::steady_clock::now();
doFourierTransform(fourier_transform::Direction::CtoR);
//std::chrono::steady_clock::time_point t6 = std::chrono::steady_clock::now();
//std::chrono::milliseconds timeCtoR = std::chrono::duration_cast<std::chrono::milliseconds>(t6 - t5);
if (pml.get()) getPml()->doSecondStep();
globalTime += dt;
//std::string strRtoC = "Time RtoC: " + std::to_string(timeRtoC.count()) + "\n";
//std::string strSolver = "Time PSATDT: " + std::to_string(timeSolver.count()) + "\n";
//std::string strCtoR = "Time CtoR: " + std::to_string(timeCtoR.count()) + "\n";
//std::cout << strRtoC << strSolver << strCtoR << std::endl;
}
template <bool ifPoisson>
inline void PSATDT<ifPoisson>::convertFieldsPoissonEquation() {
doFourierTransform(fourier_transform::Direction::RtoC);
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = this->dt *0.5;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
if (normK == 0) {
continue;
}
K = K / normK;
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E);
complexGrid->Ex(i, j, k) -= El.x;
complexGrid->Ey(i, j, k) -= El.y;
complexGrid->Ez(i, j, k) -= El.z;
}
}
doFourierTransform(fourier_transform::Direction::CtoR);
}
template <bool ifPoisson>
inline void PSATDT<ifPoisson>::updateEB()
{
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = 0.5 * this->dt;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k));
J = complexFP(4 * constants::pi) * J;
if (normK == 0) {
complexGrid->Ex(i, j, k) += -J.x;
complexGrid->Ey(i, j, k) += -J.y;
complexGrid->Ez(i, j, k) += -J.z;
continue;
}
K = K / normK;
ComplexFP3 kEcross = cross((ComplexFP3)K, E), kBcross = cross((ComplexFP3)K, B),
kJcross = cross((ComplexFP3)K, J);
ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J), El = (ComplexFP3)K * dot((ComplexFP3)K, E);
FP S = sin(normK*constants::c*dt), C = cos(normK*constants::c*dt);
complexFP coef1E = S * complexFP::i(), coef2E = -S / (normK*constants::c),
coef3E = S / (normK*constants::c) - dt;
complexGrid->Ex(i, j, k) = C * E.x + coef1E * kBcross.x + (1 - C) * El.x + coef2E * J.x + coef3E * Jl.x;
complexGrid->Ey(i, j, k) = C * E.y + coef1E * kBcross.y + (1 - C) * El.y + coef2E * J.y + coef3E * Jl.y;
complexGrid->Ez(i, j, k) = C * E.z + coef1E * kBcross.z + (1 - C) * El.z + coef2E * J.z + coef3E * Jl.z;
complexFP coef1B = -S * complexFP::i(), coef2B = ((1 - C) / (normK*constants::c))*complexFP::i();
complexGrid->Bx(i, j, k) = C * B.x + coef1B * kEcross.x + coef2B * kJcross.x;
complexGrid->By(i, j, k) = C * B.y + coef1B * kEcross.y + coef2B * kJcross.y;
complexGrid->Bz(i, j, k) = C * B.z + coef1B * kEcross.z + coef2B * kJcross.z;
}
}
}
// provides k \cdot E = 0 always (k \cdot J = 0 too)
template <>
inline void PSATDT<true>::updateEB()
{
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = 0.5 * this->dt;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
FP3 K = getWaveVector(Int3(i, j, k));
FP normK = K.norm();
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k));
J = complexFP(4 * constants::pi) * J;
if (normK == 0) {
complexGrid->Ex(i, j, k) += -J.x;
complexGrid->Ey(i, j, k) += -J.y;
complexGrid->Ez(i, j, k) += -J.z;
continue;
}
K = K / normK;
ComplexFP3 kEcross = cross((ComplexFP3)K, E), kBcross = cross((ComplexFP3)K, B),
kJcross = cross((ComplexFP3)K, J);
ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J), El = (ComplexFP3)K * dot((ComplexFP3)K, E);
FP S = sin(normK*constants::c*dt), C = cos(normK*constants::c*dt);
complexFP coef1E = S * complexFP::i(), coef2E = -S / (normK*constants::c),
coef3E = S / (normK*constants::c) - dt;
complexGrid->Ex(i, j, k) = C * (E.x - El.x) + coef1E * kBcross.x + coef2E * (J.x - Jl.x);
complexGrid->Ey(i, j, k) = C * (E.y - El.y) + coef1E * kBcross.y + coef2E * (J.y - Jl.y);
complexGrid->Ez(i, j, k) = C * (E.z - El.z) + coef1E * kBcross.z + coef2E * (J.z - Jl.z);
complexFP coef1B = -S * complexFP::i(), coef2B = ((1 - C) / (normK*constants::c))*complexFP::i();
complexGrid->Bx(i, j, k) = C * B.x + coef1B * kEcross.x + coef2B * kJcross.x;
complexGrid->By(i, j, k) = C * B.y + coef1B * kEcross.y + coef2B * kJcross.y;
complexGrid->Bz(i, j, k) = C * B.z + coef1B * kEcross.z + coef2B * kJcross.z;
}
}
}
typedef PSATDT<true> PSATDPoisson;
typedef PSATDT<false> PSATD;
typedef PSATDTimeStraggeredT<true> PSATDTimeStraggeredPoisson;
typedef PSATDTimeStraggeredT<false> PSATDTimeStraggered;
}
|
implicit_task_data.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// This test checks that values stored in task_data in a barrier_begin event
// are still present in the corresponding barrier_end event.
// Therefore, callback implementations different from the ones in callback.h are neccessary.
// This is a test for an issue reported in
// https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39
#define _BSD_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
static const char* ompt_thread_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_thread_data_t ompt_get_thread_data;
int main()
{
#pragma omp parallel num_threads(4)
{
#pragma omp master
{
sleep(1);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[MRA:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[MRA]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[MRA]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[MRA]]
// worker thread implicit barrier at parallel end
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[WRA:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[WRA]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]]
return 0;
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
task_data->value = ompt_get_unique_id();
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64
": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
if (kind == ompt_sync_region_barrier_implicit)
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
}
#define register_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_callback(name) register_callback_t(name, name##_t)
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback_t ompt_set_callback;
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback(ompt_callback_thread_begin);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
|
matrix.h | /*
Copyright 2016 Waizung Taam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
- 2016-08-17
- ======== tensor::Matrix ========
- namespace tensor
- class Matrix
- Declaration
- type
- constructors, etc
- shape
- iterators
- accessors
- modifiers
- arithmetic
- comparisons
- io
- helper functions
- private data member
- Implementation
- Same order as declared
- namespace internal before
- transpose
- arithmetic
- comparisons
- class MatrixException
*/
#ifndef TENSOR_MATRIX_H_
#define TENSOR_MATRIX_H_
#include "vector.h"
#include <algorithm>
#include <cmath>
#include <exception>
#include <fstream>
#include <functional>
#include <iomanip>
#include <iostream>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <omp.h>
#include <x86intrin.h>
namespace tensor {
class MatrixException;
template <typename Tp>
class Matrix {
public:
// ======== Types ========
typedef Tp value_type;
typedef typename std::vector<Vector<Tp>>::size_type size_type;
typedef typename std::make_signed<size_type>::type index_type;
typedef typename std::vector<Vector<Tp>>::difference_type difference_type;
typedef size_type dimension_type;
typedef typename std::vector<Vector<Tp>>::iterator iterator;
typedef typename std::vector<Vector<Tp>>::const_iterator const_iterator;
typedef typename std::vector<Vector<Tp>>::reverse_iterator reverse_iterator;
typedef typename std::vector<Vector<Tp>>::const_reverse_iterator
const const_reverse_iterator;
// ======== Constructors, etc ========
Matrix();
Matrix(const size_type& num_rows, const size_type& num_cols);
Matrix(const size_type& num_rows, const size_type& num_cols,
const Tp& val_init);
template <typename OtherT>
Matrix(const size_type& num_rows, const size_type& num_cols,
const OtherT& val_cast);
Matrix(const Matrix& mat_init);
template <typename OtherT>
Matrix(const Matrix<OtherT>& mat_cast);
Matrix(Matrix&& mat_init);
template <typename OtherT>
Matrix(Matrix<OtherT>&& mat_cat);
/*explicit */Matrix(const std::vector<std::vector<Tp>>& stdvec_init);
template <typename OtherT>
/*explicit */Matrix(const std::vector<std::vector<OtherT>>& stdvec_cast);
/*explicit */Matrix(const std::initializer_list<
std::initializer_list<Tp>>& il_init);
template <typename OtherT>
/*explicit */Matrix(const std::initializer_list<
std::initializer_list<OtherT>>& il_cast);
explicit Matrix(const Vector<Tp>& vec_init);
template <typename OtherT>
explicit Matrix(const Vector<OtherT>& vec_cast);
template <typename ParamT1, typename ParamT2>
Matrix(const size_type& num_rows, const size_type& num_cols,
Random::Distribution dis, const ParamT1& param1,
const ParamT2& param2);
template <typename ParamT>
Matrix(const size_type& num_rows, const size_type& num_cols,
Random::Distribution dis, const ParamT& param);
Matrix& operator=(const Tp& val_assign);
template <typename OtherT>
Matrix& operator=(const OtherT& val_cast);
Matrix& operator=(const Matrix& mat_copy);
template <typename OtherT>
Matrix& operator=(const Matrix<OtherT>& mat_cast);
Matrix& operator=(Matrix&& mat_move);
template <typename OtherT>
Matrix& operator=(Matrix<OtherT>&& mat_cast);
Matrix& operator=(const std::vector<std::vector<Tp>>& stdvec_assign);
template <typename OtherT>
Matrix& operator=(const std::vector<std::vector<OtherT>>& stdvec_cast);
Matrix& operator=(const std::initializer_list<
std::initializer_list<Tp>>& il_assign);
template <typename OtherT>
Matrix& operator=(const std::initializer_list<
std::initializer_list<OtherT>>& il_cast);
Matrix& operator=(const Vector<Tp>& vec_assign);
template <typename OtherT>
Matrix& operator=(const Vector<OtherT>& vec_cast);
~Matrix();
// ======== Shape ========
Vector<size_type> shape() const;
void clear();
bool empty();
// ======== Iterators ========
iterator begin();
iterator end();
const_iterator begin() const;
const_iterator end() const;
const_iterator cbegin() const;
const_iterator cend() const;
reverse_iterator rbegin();
reverse_iterator rend();
const_reverse_iterator rbegin() const;
const_reverse_iterator rend() const;
const_reverse_iterator crbegin() const;
const_reverse_iterator crend() const;
// ======== Accessors ========
Vector<Tp>& operator[](const index_type& index);
const Vector<Tp>& operator[](const index_type& index) const;
Matrix operator()(const index_type& idx_row) const;
Matrix operator()(const const_iterator& cit_row) const;
Matrix operator()(const index_type& idx_row_begin,
const index_type& idx_row_end) const;
Matrix operator()(const const_iterator& cit_row_begin,
const const_iterator& cit_row_end) const;
Matrix operator()(const index_type& idx_row_begin,
const index_type& idx_row_end,
const index_type& idx_col_begin,
const index_type& idx_col_end) const;
// ======== Modifiers ========
Matrix insert(const Vector<Tp>& vec_insert, const dimension_type& dim_insert,
const index_type& idx_insert) const;
Matrix insert(const Matrix& mat_insert, const dimension_type& dim_insert,
const index_type& idx_insert) const;
Matrix remove(const dimension_type& dim_remove, const
index_type& idx_remove) const;
Matrix remove(const dimension_type& dim_remove, const index_type& idx_begin,
const index_type& idx_end) const;
Matrix replace(const Vector<Tp>& vec_replace,
const dimension_type& dim_replace,
const index_type& idx_row_begin,
const index_type& idx_col_begin) const;
Matrix replace(const Matrix& mat_replace, const index_type& idx_row_begin,
const index_type& idx_col_begin) const;
Matrix transpose() const;
Matrix T() const;
Matrix reshape(const size_type& num_rows, const size_type& num_cols) const;
Matrix shuffle() const;
// ======== Arithmetic ========
template <typename AriT>
friend Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs,
const AriT& val_rhs);
template <typename AriT>
friend Matrix<AriT> operator+(const AriT& val_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs,
const AriT& val_rhs);
template <typename AriT>
friend Matrix<AriT> operator-(const AriT& val_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs,
const AriT& val_rhs);
template <typename AriT>
friend Matrix<AriT> operator*(const AriT& val_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs);
template <typename AriT>
friend Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs,
const AriT& val_rhs);
template <typename AriT>
friend Matrix<AriT> operator/(const AriT& val_lhs,
const Matrix<AriT>& mat_rhs);
void operator+=(const Matrix& mat_rhs);
void operator+=(const Tp& val_rhs);
void operator-=(const Matrix& mat_rhs);
void operator-=(const Tp& val_rhs);
void operator*=(const Matrix& mat_rhs);
void operator*=(const Tp& val_rhs);
void operator/=(const Matrix& mat_rhs);
void operator/=(const Tp& val_rhs);
Matrix times(const Matrix& mat_rhs) const;
Tp sum() const;
Vector<Tp> sum(const dimension_type& dim_sum) const;
// ======== Comparisons ========
template <typename CmpT>
friend Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator==(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator!=(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator<=(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs,
const CmpT& val_rhs);
template <typename CmpT>
friend Matrix<CmpT> operator>=(const CmpT& val_lhs,
const Matrix<CmpT>& mat_rhs);
bool equal(const Matrix& mat_rhs, std::size_t ulp = 1);
bool nequal(const Matrix& mat_rhs, std::size_t ulp = 1);
Tp max() const;
Vector<Tp> max(const dimension_type& dim_max) const;
Tp min() const;
Vector<Tp> min(const dimension_type& dim_min) const;
// ======== IO ========
template <typename MatT, typename CharT, typename Traits>
friend std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os, const Matrix<MatT>& mat);
template <typename MatT, typename CharT, typename Traits>
friend std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is, Matrix<MatT>& mat);
private:
// ======== Helper Functions ========
static index_type to_positive_index_(const size_type& size,
const index_type& index);
static void exclusive_range_check_(const size_type& size,
const index_type& index);
static void exclusive_range_check_(const iterator& it_begin,
const iterator& it_end,
const iterator& it);
static void exclusive_range_check_(const const_iterator& cit_begin,
const const_iterator& cit_end,
const const_iterator& cit);
static void inclusive_range_check_(const size_type& size,
const index_type& index);
static void inclusive_range_check_(const iterator& it_begin,
const iterator& it_end,
const iterator& it);
static void inclusive_range_check_(const const_iterator& cit_begin,
const const_iterator& cit_end,
const const_iterator& cit);
static void shape_consistence_check_(const Vector<size_type>& shape_lhs,
const Vector<size_type>& shape_rhs);
static void index_order_check_(const size_type& size,
const index_type& idx_begin,
const index_type& idx_end);
index_type to_positive_index_(const index_type& index) const;
void exclusive_range_check_(const index_type& index) const;
void exclusive_range_check_(const iterator& it);
void exclusive_range_check_(const const_iterator& cit) const;
void inclusive_range_check_(const index_type& index) const;
void inclusive_range_check_(const iterator& it);
void inclusive_range_check_(const const_iterator& cit) const;
void index_order_check_(const index_type& idx_begin,
const index_type& idx_end) const;
void iterator_order_check_(const iterator& it_begin,
const iterator& it_end);
void const_iterator_order_check_(const const_iterator& cit_begin,
const const_iterator& cit_end) const;
// ======== Private Data Member ========
std::vector<Vector<Tp>> mat_;
};
// ======== Constructors, etc ========
template <typename Tp>
Matrix<Tp>::Matrix() {}
template <typename Tp>
Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols) :
mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols))) {}
template <typename Tp>
Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols,
const Tp& val_init) :
mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols, val_init))) {}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols,
const OtherT& val_cast) :
mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols, val_cast))) {}
template <typename Tp>
Matrix<Tp>::Matrix(const Matrix& mat_init) : mat_(mat_init.mat_) {}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(const Matrix<OtherT>& mat_cast) {
mat_.resize(mat_cast.shape()[0]);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(mat_cast[idx_row]);
}
}
template <typename Tp>
Matrix<Tp>::Matrix(Matrix&& mat_init) : mat_(std::move(mat_init.mat_)) {}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(Matrix<OtherT>&& mat_cast) {
mat_.resize(mat_cast.shape()[0]);
Matrix<OtherT> mat_cache = std::move(mat_cast);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(mat_cache[idx_row]);
}
}
template <typename Tp>
Matrix<Tp>::Matrix(const std::vector<std::vector<Tp>>& stdvec_init) {
mat_.resize(stdvec_init.size());
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(stdvec_init[idx_row]);
}
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(const std::vector<std::vector<OtherT>>& stdvec_cast) {
mat_.resize(stdvec_cast.size());
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(stdvec_cast[idx_row]);
}
}
template <typename Tp>
Matrix<Tp>::Matrix(const std::initializer_list<
std::initializer_list<Tp>>& il_init) {
mat_.resize(il_init.size());
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(*(il_init.begin() + idx_row));
}
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(const std::initializer_list<
std::initializer_list<OtherT>>& il_cast) {
mat_.resize(il_cast.size());
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(*(il_cast.begin() + idx_row));
}
}
template <typename Tp>
Matrix<Tp>::Matrix(const Vector<Tp>& vec_init) {
mat_.resize(vec_init.shape()[0]);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(1, vec_init[idx_row]);
}
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>::Matrix(const Vector<OtherT>& vec_cast) {
mat_.resize(vec_cast.shape()[0]);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(1, vec_cast[idx_row]);
}
}
template <typename Tp> template <typename ParamT1, typename ParamT2>
Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols,
Random::Distribution dis, const ParamT1& param1,
const ParamT2& param2) {
mat_.resize(num_rows);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(num_cols, dis, param1, param2);
}
}
template <typename Tp> template <typename ParamT>
Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols,
Random::Distribution dis, const ParamT& param) {
mat_.resize(num_rows);
for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) {
mat_[idx_row] = Vector<Tp>(num_cols, dis, param);
}
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(const Tp& val_assign) {
mat_ = Matrix<Tp>(shape()[0], shape()[1], val_assign).mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(const OtherT& val_cast) {
mat_ = Matrix<Tp>(shape()[0], shape()[1], val_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(const Matrix& mat_copy) {
mat_ = mat_copy.mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(const Matrix<OtherT>& mat_cast) {
mat_ = Matrix<Tp>(mat_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(Matrix&& mat_move) {
mat_ = Matrix<Tp>(mat_move).mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(Matrix<OtherT>&& mat_cast) {
mat_ = Matrix<Tp>(mat_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(const std::vector<
std::vector<Tp>>& stdvec_assign) {
mat_ = Matrix<Tp>(stdvec_assign).mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(const std::vector<
std::vector<OtherT>>& stdvec_cast) {
mat_ = Matrix<Tp>(stdvec_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(const std::initializer_list<
std::initializer_list<Tp>>& il_assign) {
mat_ = Matrix<Tp>(il_assign).mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(const std::initializer_list<
std::initializer_list<OtherT>>& il_cast) {
mat_ = Matrix<Tp>(il_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>& Matrix<Tp>::operator=(const Vector<Tp>& vec_assign) {
mat_ = Matrix(vec_assign).mat_;
return *this;
}
template <typename Tp> template <typename OtherT>
Matrix<Tp>& Matrix<Tp>::operator=(const Vector<OtherT>& vec_cast) {
mat_ = Matrix(vec_cast).mat_;
return *this;
}
template <typename Tp>
Matrix<Tp>::~Matrix() {}
// ======== Shape ========
template <typename Tp>
Vector<typename Matrix<Tp>::size_type> Matrix<Tp>::shape() const {
if (mat_.size() == 0) {
return Vector<size_type>({0, 0});
}
return Vector<size_type>({mat_.size(), mat_[0].shape()[0]});
}
template <typename Tp>
void Matrix<Tp>::clear() { mat_.clear(); }
template <typename Tp>
bool Matrix<Tp>::empty() { return mat_.size() == 0; }
// ======== Iterators ========
template <typename Tp> typename
Matrix<Tp>::iterator Matrix<Tp>::begin() { return mat_.begin(); }
template <typename Tp> typename
Matrix<Tp>::iterator Matrix<Tp>::end() { return mat_.end(); }
template <typename Tp> typename
Matrix<Tp>::const_iterator Matrix<Tp>::begin() const { return mat_.cbegin(); }
template <typename Tp> typename
Matrix<Tp>::const_iterator Matrix<Tp>::end() const { return mat_.cend(); }
template <typename Tp> typename
Matrix<Tp>::const_iterator Matrix<Tp>::cbegin() const { return mat_.cbegin(); }
template <typename Tp> typename
Matrix<Tp>::const_iterator Matrix<Tp>::cend() const { return mat_.cend(); }
template <typename Tp> typename
Matrix<Tp>::reverse_iterator Matrix<Tp>::rbegin() { return mat_.rbegin(); }
template <typename Tp> typename
Matrix<Tp>::reverse_iterator Matrix<Tp>::rend() { return mat_.rend(); }
template <typename Tp> typename
Matrix<Tp>::const_reverse_iterator Matrix<Tp>::rbegin() const {
return mat_.crbegin();
}
template <typename Tp> typename
Matrix<Tp>::const_reverse_iterator Matrix<Tp>::rend() const {
return mat_.crend();
}
template <typename Tp> typename
Matrix<Tp>::const_reverse_iterator Matrix<Tp>::crbegin() const {
return mat_.crbegin();
}
template <typename Tp> typename
Matrix<Tp>::const_reverse_iterator Matrix<Tp>::crend() const {
return mat_.crend();
}
// ======== Accessors ========
template <typename Tp>
Vector<Tp>& Matrix<Tp>::operator[](const index_type& index) {
exclusive_range_check_(index);
return mat_.at(to_positive_index_(index));
}
template <typename Tp>
const Vector<Tp>& Matrix<Tp>::operator[](const index_type& index) const {
exclusive_range_check_(index);
return mat_.at(to_positive_index_(index));
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row) const {
exclusive_range_check_(idx_row);
Matrix row_mat(1, shape()[1]);
row_mat[0] = mat_[to_positive_index_(idx_row)];
return row_mat;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::operator()(const const_iterator& cit_row) const {
exclusive_range_check_(cit_row);
Matrix row_mat(1, shape()[1]);
row_mat[0] = *cit_row;
return row_mat;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row_begin,
const index_type& idx_row_end) const {
exclusive_range_check_(idx_row_begin);
inclusive_range_check_(idx_row_end);
index_order_check_(idx_row_begin, idx_row_end);
size_type idx_row_begin_p = to_positive_index_(idx_row_begin);
size_type idx_row_end_p = to_positive_index_(idx_row_end);
Matrix mat_partial(idx_row_end_p - idx_row_begin_p, shape()[1]);
for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) {
mat_partial[idx_row] = mat_[idx_row_begin_p + idx_row];
}
return mat_partial;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::operator()(const const_iterator& cit_row_begin,
const const_iterator& cit_row_end) const {
exclusive_range_check_(cit_row_begin);
inclusive_range_check_(cit_row_end);
const_iterator_order_check_(cit_row_begin, cit_row_end);
Matrix mat_partial(static_cast<size_type>(cit_row_end - cit_row_begin),
shape()[1]);
for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) {
mat_partial[idx_row] = *(cit_row_begin + idx_row);
}
return mat_partial;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row_begin,
const index_type& idx_row_end,
const index_type& idx_col_begin,
const index_type& idx_col_end) const {
exclusive_range_check_(idx_row_begin);
inclusive_range_check_(idx_row_end);
exclusive_range_check_(idx_col_begin);
inclusive_range_check_(idx_col_end);
index_order_check_(idx_row_begin, idx_row_end);
index_order_check_(idx_col_begin, idx_col_end);
size_type idx_row_begin_p = to_positive_index_(idx_row_begin);
size_type idx_row_end_p = to_positive_index_(idx_row_end);
size_type idx_col_begin_p = to_positive_index_(idx_col_begin);
size_type idx_col_end_p = to_positive_index_(idx_col_end);
Matrix mat_partial(idx_row_end_p - idx_row_begin_p,
idx_col_end_p - idx_col_begin_p);
for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) {
mat_partial[idx_row] = mat_[idx_row_begin_p + idx_row](
idx_col_begin_p, idx_col_end_p);
}
return mat_partial;
}
// ======== Modifiers ========
template <typename Tp>
Matrix<Tp> Matrix<Tp>::insert(const Vector<Tp>& vec_insert,
const dimension_type& dim_insert,
const index_type& idx_insert) const {
if (dim_insert == 0) {
if (vec_insert.shape()[0] != shape()[1]) {
std::string err_msg = "Inconsistent insert shape: insert size " +
std::to_string(vec_insert.shape()[0]) + " != number of columns " +
std::to_string(shape()[1]) + ".";
throw MatrixException(err_msg);
}
inclusive_range_check_(idx_insert);
Matrix mat_inserted = *this;
mat_inserted.mat_.insert(
mat_inserted.mat_.begin() + to_positive_index_(idx_insert), vec_insert);
return mat_inserted;
} else if (dim_insert == 1) {
if (vec_insert.shape()[0] != shape()[0]) {
std::string err_msg = "Inconsistent insert shape: insert size " +
std::to_string(vec_insert.shape()[0]) + " != number of rows " +
std::to_string(shape()[0]) + ".";
throw MatrixException(err_msg);
}
Matrix mat_inserted = *this;
for (size_type idx_row = 0; idx_row < mat_inserted.shape()[0]; ++idx_row) {
mat_inserted.mat_[idx_row] = mat_inserted.mat_[idx_row].insert(
vec_insert[idx_row], idx_insert);
}
return mat_inserted;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_insert) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::insert(const Matrix& mat_insert,
const dimension_type& dim_insert,
const index_type& idx_insert) const {
if (dim_insert == 0) {
if (mat_insert.shape()[1] != shape()[1]) {
std::string err_msg = "Inconsistent insert shape: insert size " +
std::to_string(mat_insert.shape()[1]) + " != number of columns " +
std::to_string(shape()[1]) + ".";
throw MatrixException(err_msg);
}
inclusive_range_check_(idx_insert);
Matrix mat_inserted = *this;
mat_inserted.mat_.insert(
mat_inserted.mat_.begin() + to_positive_index_(idx_insert),
mat_insert.mat_.begin(), mat_insert.mat_.end());
return mat_inserted;
} else if (dim_insert == 1) {
if (mat_insert.shape()[0] != shape()[0]) {
std::string err_msg = "Inconsistent insert shape: insert size " +
std::to_string(mat_insert.shape()[0]) + " != number of rows " +
std::to_string(shape()[0]) + ".";
throw MatrixException(err_msg);
}
Matrix mat_inserted = *this;
for (size_type idx_row = 0; idx_row < mat_inserted.shape()[0]; ++idx_row) {
mat_inserted.mat_[idx_row] = mat_inserted.mat_[idx_row].insert(
mat_insert.mat_[idx_row], idx_insert);
}
return mat_inserted;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_insert) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::remove(const dimension_type& dim_remove,
const index_type& idx_remove) const {
if (dim_remove == 0) {
exclusive_range_check_(idx_remove);
Matrix mat_removed = *this;
mat_removed.mat_.erase(mat_removed.mat_.begin() +
to_positive_index_(idx_remove));
return mat_removed;
} else if (dim_remove == 1) {
Matrix mat_removed = *this;
for (size_type idx_row = 0; idx_row < mat_removed.shape()[0]; ++idx_row) {
mat_removed.mat_[idx_row] = mat_removed.mat_[idx_row].remove(idx_remove);
}
return mat_removed;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_remove) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::remove(const dimension_type& dim_remove,
const index_type& idx_begin,
const index_type& idx_end) const {
if (dim_remove == 0) {
exclusive_range_check_(idx_begin);
inclusive_range_check_(idx_end);
index_order_check_(idx_begin, idx_end);
Matrix mat_removed = *this;
mat_removed.mat_.erase(
mat_removed.mat_.begin() + to_positive_index_(idx_begin),
mat_removed.mat_.begin() + to_positive_index_(idx_end));
return mat_removed;
} else if (dim_remove == 1) {
Matrix mat_removed = *this;
for (size_type idx_row = 0; idx_row < mat_removed.shape()[0]; ++idx_row) {
mat_removed.mat_[idx_row] = mat_removed.mat_[idx_row].remove(
idx_begin, idx_end);
}
return mat_removed;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_remove) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::replace(const Vector<Tp>& vec_replace,
const dimension_type& dim_replace,
const index_type& idx_row_begin,
const index_type& idx_col_begin) const {
if (dim_replace == 0) {
exclusive_range_check_(idx_row_begin);
Matrix mat_replaced = *this;
mat_replaced[idx_row_begin] = mat_replaced[idx_row_begin].replace(
vec_replace, idx_col_begin);
return mat_replaced;
} else if (dim_replace == 1) {
exclusive_range_check_(idx_row_begin);
size_type idx_row_begin_p = to_positive_index_(idx_row_begin);
Matrix mat_replaced = *this;
for (size_type idx_row = 0; idx_row < vec_replace.shape()[0] &&
idx_row_begin_p + idx_row < mat_replaced.shape()[0]; ++idx_row) {
mat_replaced.mat_[idx_row_begin_p + idx_row] =
mat_replaced.mat_[idx_row_begin_p + idx_row].replace(
vec_replace[idx_row], idx_col_begin);
}
return mat_replaced;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_replace) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::replace(const Matrix& mat_replace,
const index_type& idx_row_begin,
const index_type& idx_col_begin) const {
exclusive_range_check_(idx_row_begin);
size_type idx_row_begin_p = to_positive_index_(idx_row_begin);
Matrix mat_replaced = *this;
for (size_type idx_row = 0; idx_row < mat_replace.shape()[0] &&
idx_row_begin_p + idx_row < mat_replaced.shape()[0]; ++idx_row) {
mat_replaced.mat_[idx_row_begin_p + idx_row] =
mat_replaced.mat_[idx_row_begin_p + idx_row].replace(
mat_replace[idx_row], idx_col_begin);
}
return mat_replaced;
}
// namespace internal
namespace internal {
template <typename Tp>
void transpose(const Matrix<Tp>& mat_from, Matrix<Tp>& mat_to) {
typename Matrix<Tp>::size_type num_rows = mat_to.shape()[0],
num_cols = mat_to.shape()[1],
size_c_to_c_omp = 8;
const Vector<Tp>* ptr_row_from = &mat_from[0];
Vector<Tp>* ptr_row_to = &mat_to[0];
if (num_rows + num_cols < 2 * size_c_to_c_omp) {
for (typename Matrix<Tp>::size_type idx_col = 0;
idx_col < num_cols; ++idx_col) {
const Tp* ptr_col_from = &ptr_row_from[idx_col][0];
for (typename Matrix<Tp>::size_type idx_row = 0;
idx_row < num_rows; ++idx_row) {
Tp* ptr_col_to = &ptr_row_to[idx_row][0];
ptr_col_to[idx_col] = ptr_col_from[idx_row];
}
}
} else {
#pragma omp parallel for shared(ptr_row_from, ptr_row_to) \
schedule(auto) collapse(2)
for (typename Matrix<Tp>::size_type idx_col = 0;
idx_col < num_cols; ++idx_col) {
for (typename Matrix<Tp>::size_type idx_row = 0;
idx_row < num_rows; ++idx_row) {
const Tp* ptr_col_from = &ptr_row_from[idx_col][0];
Tp* ptr_col_to = &ptr_row_to[idx_row][0];
ptr_col_to[idx_col] = ptr_col_from[idx_row];
}
}
}
}
} // namespace internal
template <typename Tp>
Matrix<Tp> Matrix<Tp>::transpose() const {
Matrix mat_t(shape()[1], shape()[0]);
internal::transpose(*this, mat_t);
return mat_t;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::T() const {
return transpose();
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::reshape(const size_type& num_rows,
const size_type& num_cols) const {
Vector<Tp> vec_cache(num_rows * num_cols);
bool is_end_of_vec_cache = false;
typename Vector<Tp>::size_type idx_vec_cache = 0;
for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) {
for (size_type idx_col = 0; idx_col < shape()[1]; ++idx_col) {
vec_cache[idx_vec_cache] = mat_[idx_row][idx_col];
++idx_vec_cache;
if (idx_vec_cache >= vec_cache.shape()[0]) {
is_end_of_vec_cache = true;
break;
}
}
if (is_end_of_vec_cache) {
break;
}
}
Matrix<Tp> mat_reshaped(num_rows, num_cols);
idx_vec_cache = 0;
for (index_type idx_row = 0; idx_row < mat_reshaped.shape()[0]; ++idx_row) {
for (index_type idx_col = 0; idx_col < mat_reshaped.shape()[1];
++idx_col) {
mat_reshaped.mat_[idx_row][idx_col] = vec_cache[idx_vec_cache];
++idx_vec_cache;
}
}
return mat_reshaped;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::shuffle() const {
std::random_device rd;
std::default_random_engine gen(rd());
Matrix mat_shuffled = *this;
std::shuffle(mat_shuffled.mat_.begin(), mat_shuffled.mat_.end(), gen);
return mat_shuffled;
}
// ======== Arithmetic ========
namespace internal {
#define OMP_FOR_3_MAT \
_Pragma("omp parallel for shared(mat_lhs, mat_rhs, mat_ans) schedule(auto)")
#define OMP_FOR_2_MAT_L_ANS \
_Pragma("omp parallel for shared(mat_lhs, val_rhs, mat_ans) schedule(auto)")
#define OMP_FOR_2_MAT_R_ANS \
_Pragma("omp parallel for shared(val_lhs, mat_rhs, mat_ans) schedule(auto)")
#define MAT_OP_MAT(OPERATION, OPERATOR) \
template <typename OpT> \
void OPERATION(const Matrix<OpT>& mat_lhs, const Matrix<OpT>& mat_rhs, \
Matrix<OpT>& mat_ans) { \
OMP_FOR_3_MAT \
for (typename Matrix<OpT>::size_type idx_row = 0; \
idx_row < mat_ans.shape()[0]; ++idx_row) { \
mat_ans[idx_row] = (mat_lhs[idx_row] OPERATOR mat_rhs[idx_row]); \
} \
}
MAT_OP_MAT(add, +)
MAT_OP_MAT(sub, -)
MAT_OP_MAT(mul, *)
MAT_OP_MAT(div, /)
#define MAT_OP_SCA(OPERATION, OPERATOR) \
template <typename OpT> \
void OPERATION(const Matrix<OpT>& mat_lhs, const OpT& val_rhs, \
Matrix<OpT>& mat_ans) { \
OMP_FOR_2_MAT_L_ANS \
for (typename Matrix<OpT>::size_type idx_row = 0; \
idx_row < mat_ans.shape()[0]; ++idx_row) { \
mat_ans[idx_row] = (mat_lhs[idx_row] OPERATOR val_rhs); \
} \
}
MAT_OP_SCA(add, +)
MAT_OP_SCA(sub, -)
MAT_OP_SCA(mul, *)
MAT_OP_SCA(div, /)
#define SCA_OP_MAT(OPERATION, OPERATOR) \
template <typename OpT> \
void OPERATION(const OpT& val_lhs, const Matrix<OpT>& mat_rhs, \
Matrix<OpT>& mat_ans) { \
OMP_FOR_2_MAT_R_ANS \
for (typename Matrix<OpT>::size_type idx_row = 0; \
idx_row < mat_ans.shape()[0]; ++idx_row) { \
mat_ans[idx_row] = (val_lhs OPERATOR mat_rhs[idx_row]); \
} \
}
SCA_OP_MAT(add, +)
SCA_OP_MAT(sub, -)
SCA_OP_MAT(mul, *)
SCA_OP_MAT(div, /)
} // namespace internal
template <typename AriT>
Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs) {
Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<AriT> mat_sum(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::add(mat_lhs, mat_rhs, mat_sum);
return mat_sum;
}
template <typename AriT>
Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) {
Matrix<AriT> mat_sum(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::add(mat_lhs, val_rhs, mat_sum);
return mat_sum;
}
template <typename AriT>
Matrix<AriT> operator+(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) {
Matrix<AriT> mat_sum(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::add(val_lhs,mat_rhs, mat_sum);
return mat_sum;
}
template <typename AriT>
Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs) {
Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<AriT> mat_diff(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::sub(mat_lhs, mat_rhs, mat_diff);
return mat_diff;
}
template <typename AriT>
Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) {
Matrix<AriT> mat_diff(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::sub(mat_lhs, val_rhs, mat_diff);
return mat_diff;
}
template <typename AriT>
Matrix<AriT> operator-(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) {
Matrix<AriT> mat_diff(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::sub(val_lhs, mat_rhs, mat_diff);
return mat_diff;
}
// namespace internal
namespace internal {
template <typename MatT>
void mat_mul(const Matrix<MatT>& mat_lhs, const Matrix<MatT>& mat_rhs,
Matrix<MatT>& mat_ans) {
typename Matrix<MatT>::size_type num_rows = mat_ans.shape()[0],
num_cols = mat_ans.shape()[1];
Matrix<MatT> mat_rhs_t = mat_rhs.transpose();
const Vector<MatT>* ptr_row_lhs = &mat_lhs[0];
const Vector<MatT>* ptr_row_rhs = &mat_rhs_t[0];
Vector<MatT>* ptr_row_ans = &mat_ans[0];
#pragma omp parallel for shared(ptr_row_lhs, ptr_row_rhs, ptr_row_ans) \
schedule(auto) collapse(2)
for (typename Matrix<MatT>::size_type idx_row = 0;
idx_row < num_rows; ++idx_row) {
for (typename Matrix<MatT>::size_type idx_col = 0;
idx_col < num_cols; ++idx_col) {
*(&ptr_row_ans[idx_row][0] + idx_col) +=
(ptr_row_lhs[idx_row] * ptr_row_rhs[idx_col]).sum();
}
}
}
} // namespace internal
template <typename AriT>
Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs) {
if (mat_lhs.shape()[1] != mat_rhs.shape()[0]) {
std::string err_msg = "Inconsistent shape for matrix multiplication: " +
std::to_string(mat_lhs.shape()[1]) + " != " +
std::to_string(mat_rhs.shape()[0]) + ".";
throw MatrixException(err_msg);
}
Matrix<AriT> mat_prod(mat_lhs.shape()[0], mat_rhs.shape()[1]);
internal::mat_mul(mat_lhs, mat_rhs, mat_prod);
return mat_prod;
}
template <typename AriT>
Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) {
Matrix<AriT> mat_prod(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::mul(mat_lhs, val_rhs, mat_prod);
return mat_prod;
}
template <typename AriT>
Matrix<AriT> operator*(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) {
Matrix<AriT> mat_prod(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::mul(val_lhs, mat_rhs, mat_prod);
return mat_prod;
}
template <typename AriT>
Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs,
const Matrix<AriT>& mat_rhs) {
Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<AriT> mat_quot(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::div(mat_lhs, mat_rhs, mat_quot);
return mat_quot;
}
template <typename AriT>
Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) {
Matrix<AriT> mat_quot(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::div(mat_lhs, val_rhs, mat_quot);
return mat_quot;
}
template <typename AriT>
Matrix<AriT> operator/(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) {
Matrix<AriT> mat_quot(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::div(val_lhs, mat_rhs, mat_quot);
return mat_quot;
}
template <typename Tp>
void Matrix<Tp>::operator+=(const Matrix<Tp>& mat_rhs) {
(*this) = (*this) + mat_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator+=(const Tp& val_rhs) {
(*this) = (*this) + val_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator-=(const Matrix<Tp>& mat_rhs) {
(*this) = (*this) - mat_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator-=(const Tp& val_rhs) {
(*this) = (*this) - val_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator*=(const Matrix<Tp>& mat_rhs) {
(*this) = (*this) * mat_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator*=(const Tp& val_rhs) {
(*this) = (*this) * val_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator/=(const Matrix<Tp>& mat_rhs) {
(*this) = (*this) / mat_rhs;
}
template <typename Tp>
void Matrix<Tp>::operator/=(const Tp& val_rhs) {
(*this) = (*this) / val_rhs;
}
template <typename Tp>
Matrix<Tp> Matrix<Tp>::times(const Matrix& mat_rhs) const {
shape_consistence_check_(shape(), mat_rhs.shape());
Matrix mat_prod(shape()[0], shape()[1]);
internal::mul(*this, mat_rhs, mat_prod);
return mat_prod;
}
template <typename Tp>
Tp Matrix<Tp>::sum() const {
Tp sum_val = Tp();
#pragma omp parallel for schedule(auto) reduction(+ : sum_val)
for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) {
sum_val = sum_val + mat_[idx_row].sum();
}
return sum_val;
}
// namespace internal
namespace internal {
template <typename SumT>
void sum_of_dim_one(const Matrix<SumT>& mat, Vector<SumT>& vec_sum) {
for (typename Matrix<SumT>::size_type idx_row = 0;
idx_row < mat.shape()[0]; ++idx_row) {
vec_sum[idx_row] = mat[idx_row].sum();
}
}
} // namespace internal
template <typename Tp>
Vector<Tp> Matrix<Tp>::sum(const dimension_type& dim_sum) const {
if (dim_sum == 0) {
Vector<Tp> vec_sum(shape()[1]);
internal::sum_of_dim_one(transpose(), vec_sum);
return vec_sum;
} else if (dim_sum == 1) {
Vector<Tp> vec_sum(shape()[0]);
internal::sum_of_dim_one(*this, vec_sum);
return vec_sum;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_sum) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
// ======== Comparisons ========
namespace internal {
MAT_OP_MAT(eq, ==)
MAT_OP_MAT(ne, !=)
MAT_OP_MAT(lt, <)
MAT_OP_MAT(le, <=)
MAT_OP_MAT(gt, >)
MAT_OP_MAT(ge, >=)
MAT_OP_SCA(eq, ==)
MAT_OP_SCA(ne, !=)
MAT_OP_SCA(lt, <)
MAT_OP_SCA(le, <=)
MAT_OP_SCA(gt, >)
MAT_OP_SCA(ge, >=)
SCA_OP_MAT(eq, ==)
SCA_OP_MAT(ne, !=)
SCA_OP_MAT(lt, <)
SCA_OP_MAT(le, <=)
SCA_OP_MAT(gt, >)
SCA_OP_MAT(ge, >=)
#undef MAT_OP_MAT
#undef MAT_OP_SCA
#undef SCA_OP_MAT
} // namespace internal
template <typename CmpT>
Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_eq(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::eq(mat_lhs, mat_rhs, mat_eq);
return mat_eq;
}
template <typename CmpT>
Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_eq(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::eq(mat_lhs, val_rhs, mat_eq);
return mat_eq;
}
template <typename CmpT>
Matrix<CmpT> operator==(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_eq(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::eq(val_lhs, mat_rhs, mat_eq);
return mat_eq;
}
template <typename CmpT>
Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_ne(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::ne(mat_lhs, mat_rhs, mat_ne);
return mat_ne;
}
template <typename CmpT>
Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_ne(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::ne(mat_lhs, val_rhs, mat_ne);
return mat_ne;
}
template <typename CmpT>
Matrix<CmpT> operator!=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_ne(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::ne(val_lhs, mat_rhs, mat_ne);
return mat_ne;
}
template <typename CmpT>
Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_lt(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::lt(mat_lhs, mat_rhs, mat_lt);
return mat_lt;
}
template <typename CmpT>
Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_lt(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::lt(mat_lhs, val_rhs, mat_lt);
return mat_lt;
}
template <typename CmpT>
Matrix<CmpT> operator<(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_lt(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::lt(val_lhs, mat_rhs, mat_lt);
return mat_lt;
}
template <typename CmpT>
Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_le(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::le(mat_lhs, mat_rhs, mat_le);
return mat_le;
}
template <typename CmpT>
Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_le(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::le(mat_lhs, val_rhs, mat_le);
return mat_le;
}
template <typename CmpT>
Matrix<CmpT> operator<=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_le(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::le(val_lhs, mat_rhs, mat_le);
return mat_le;
}
template <typename CmpT>
Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_gt(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::gt(mat_lhs, mat_rhs, mat_gt);
return mat_gt;
}
template <typename CmpT>
Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_gt(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::gt(mat_lhs, val_rhs, mat_gt);
return mat_gt;
}
template <typename CmpT>
Matrix<CmpT> operator>(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_gt(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::gt(val_lhs, mat_rhs, mat_gt);
return mat_gt;
}
template <typename CmpT>
Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs,
const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape());
Matrix<CmpT> mat_ge(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::ge(mat_lhs, mat_rhs, mat_ge);
return mat_ge;
}
template <typename CmpT>
Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) {
Matrix<CmpT> mat_ge(mat_lhs.shape()[0], mat_lhs.shape()[1]);
internal::ge(mat_lhs, val_rhs, mat_ge);
return mat_ge;
}
template <typename CmpT>
Matrix<CmpT> operator>=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) {
Matrix<CmpT> mat_ge(mat_rhs.shape()[0], mat_rhs.shape()[1]);
internal::ge(val_lhs, mat_rhs, mat_ge);
return mat_ge;
}
template <typename Tp>
bool Matrix<Tp>::equal(const Matrix& mat_rhs, std::size_t ulp) {
if (shape()[0] != mat_rhs.shape()[0] || shape()[1] != mat_rhs.shape()[1]) {
return false;
}
for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) {
if (mat_[idx_row].nequal(mat_rhs.mat_[idx_row])) {
return false;
}
}
return true;
}
template <typename Tp>
bool Matrix<Tp>::nequal(const Matrix& mat_rhs, std::size_t ulp) {
return !equal(mat_rhs, ulp);
}
template <typename Tp>
Tp Matrix<Tp>::max() const {
Tp max_element = mat_[0].max();
for (size_type idx_row = 1; idx_row < shape()[0]; ++idx_row) {
if (mat_[idx_row].max() > max_element) {
max_element = mat_[idx_row].max();
}
}
return max_element;
}
// namespace internal
namespace internal {
template <typename MaxT>
void max_of_dim_one(const Matrix<MaxT>& mat, Vector<MaxT>& max_vec) {
for (typename Matrix<MaxT>::size_type idx_row = 0;
idx_row < mat.shape()[0]; ++idx_row) {
max_vec[idx_row] = mat[idx_row].max();
}
}
template <typename MinT>
void min_of_dim_one(const Matrix<MinT>& mat, Vector<MinT>& min_vec) {
for (typename Matrix<MinT>::size_type idx_row = 0;
idx_row < mat.shape()[0]; ++idx_row) {
min_vec[idx_row] = mat[idx_row].min();
}
}
} // namespace internal
template <typename Tp>
Vector<Tp> Matrix<Tp>::max(const dimension_type& dim_max) const {
if (dim_max == 0) {
Vector<Tp> max_vec(shape()[1]);
internal::max_of_dim_one(transpose(), max_vec);
} else if (dim_max == 1) {
Vector<Tp> max_vec(shape()[0]);
internal::max_of_dim_one(*this, max_vec);
return max_vec;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_max) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
template <typename Tp>
Tp Matrix<Tp>::min() const {
Tp min_element = mat_[0].min();
for (size_type idx_row = 1; idx_row < shape()[0]; ++idx_row) {
if (mat_[idx_row].min() < min_element) {
min_element = mat_[idx_row].min();
}
}
return min_element;
}
template <typename Tp>
Vector<Tp> Matrix<Tp>::min(const dimension_type& dim_min) const {
if (dim_min == 0) {
Vector<Tp> min_vec(shape()[1]);
internal::min_of_dim_one(transpose(), min_vec);
return min_vec;
} else if (dim_min == 1) {
Vector<Tp> min_vec(shape()[0]);
internal::min_of_dim_one(*this, min_vec);
return min_vec;
} else {
std::string err_msg = "Invalid Dimension: " + std::to_string(dim_min) +
" != 0 or 1.";
throw MatrixException(err_msg);
}
}
// ======== IO ========
template <typename MatT, typename CharT, typename Traits>
std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os, const Matrix<MatT>& mat) {
if (mat.shape()[0] == 0) {
os << "[[]]";
return os;
}
if (mat.shape()[0] == 1) {
os << "[" << mat.mat_[0] << "]";
return os;
}
os << "[" << mat.mat_[0] << "\n";
for (typename Matrix<MatT>::size_type idx_row = 1;
idx_row < mat.shape()[0] - 1; ++idx_row) {
os << " " << mat.mat_[idx_row] << "\n";
}
os << " " << mat.mat_[mat.shape()[0] - 1] << "]";
return os;
}
template <typename MatT, typename CharT, typename Traits>
std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is, Matrix<MatT>& mat) {
for (typename Matrix<MatT>::size_type idx_row = 0;
idx_row < mat.shape()[0]; ++idx_row) {
is >> mat.mat_[idx_row];
}
return is;
}
// ======== Helper Functions ========
template <typename Tp>
typename Matrix<Tp>::index_type Matrix<Tp>::to_positive_index_(
const size_type& size, const index_type& index) {
return index >= 0 ? index : size + index;
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const size_type& size,
const index_type& index) {
size_type pos_index = to_positive_index_(size, index);
if (pos_index >= size) {
std::string err_msg = "Out-of-Range: row index " +
std::to_string(index) + " is out of range [0, " +
std::to_string(size) + ").";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const iterator& it_begin,
const iterator& it_end,
const iterator& it) {
if (it < it_begin || it >= it_end) {
std::string err_msg =
"Out-of-Range: row iterator is out of the range [begin(), end()).";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const const_iterator& cit_begin,
const const_iterator& cit_end,
const const_iterator& cit) {
if (cit < cit_begin || cit >= cit_end) {
std::string err_msg =
"Out-of-Range: row const_iterator is out of the range [begin(), end()).";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const size_type& size,
const index_type& index) {
size_type pos_index = to_positive_index_(size, index);
if (pos_index > size) {
std::string err_msg = "Out-of-Range: row index " +
std::to_string(index) + " is out of range [0, " +
std::to_string(size) + "].";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const iterator& it_begin,
const iterator& it_end,
const iterator& it) {
if (it < it_begin || it > it_end) {
std::string err_msg =
"Out-of-Range: row iterator is out of the range [begin(), end()].";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const const_iterator& cit_begin,
const const_iterator& cit_end,
const const_iterator& cit) {
if (cit < cit_begin || cit > cit_end) {
std::string err_msg =
"Out-of-Range: row const_iterator is out of the range [cbegin(), cend()].";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::shape_consistence_check_(const Vector<size_type>& shape_lhs,
const Vector<size_type>& shape_rhs) {
if (shape_lhs[0] != shape_rhs[0] || shape_lhs[1] != shape_rhs[1]) {
std::string err_msg = "Inconsistent shape: [" +
std::to_string(shape_lhs[0]) + ", " +
std::to_string(shape_lhs[1]) + "] != [" +
std::to_string(shape_rhs[0]) + ", " +
std::to_string(shape_rhs[1]) + "].";
throw MatrixException(err_msg);
}
}
template <typename Tp>
void Matrix<Tp>::index_order_check_(const size_type& size,
const index_type& idx_begin,
const index_type& idx_end) {
if (to_positive_index_(size, idx_begin) >
to_positive_index_(size, idx_end)) {
std::string err_msg = "Invalid Row Index Order: begin " +
std::to_string(to_positive_index_(size, idx_begin)) + " > end " +
std::to_string(to_positive_index_(size, idx_end)) + ".";
throw MatrixException(err_msg);
}
}
template <typename Tp>
typename Matrix<Tp>::index_type Matrix<Tp>::to_positive_index_(
const index_type& index) const {
return to_positive_index_(mat_.size(), index);
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const index_type& index) const {
exclusive_range_check_(mat_.size(), index);
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const iterator& it) {
exclusive_range_check_(begin(), end(), it);
}
template <typename Tp>
void Matrix<Tp>::exclusive_range_check_(const const_iterator& cit) const {
exclusive_range_check_(cbegin(), cend(), cit);
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const index_type& index) const {
inclusive_range_check_(mat_.size(), index);
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const iterator& it) {
inclusive_range_check_(begin(), end(), it);
}
template <typename Tp>
void Matrix<Tp>::inclusive_range_check_(const const_iterator& cit) const {
inclusive_range_check_(cbegin(), cend(), cit);
}
template <typename Tp>
void Matrix<Tp>::index_order_check_(const index_type& idx_begin,
const index_type& idx_end) const {
index_order_check_(mat_.size(), idx_begin, idx_end);
}
template <typename Tp>
void Matrix<Tp>::iterator_order_check_(const iterator& it_begin,
const iterator& it_end) {
index_order_check_(mat_.size(),
static_cast<index_type>(it_begin - mat_.begin()),
static_cast<index_type>(it_end - mat_.begin()));
}
template <typename Tp>
void Matrix<Tp>::const_iterator_order_check_(
const const_iterator& cit_begin, const const_iterator& cit_end) const {
index_order_check_(mat_.size(),
static_cast<index_type>(cit_begin - mat_.cbegin()),
static_cast<index_type>(cit_end - mat_.cbegin()));
}
// ======== ENd of class Vector ========
class MatrixException {
public:
MatrixException() noexcept {};
MatrixException(const MatrixException& other) noexcept : msg_(other.msg_) {}
explicit MatrixException(const std::string& message) noexcept :
msg_(message) {}
explicit MatrixException(const char* message) noexcept : msg_(message) {}
MatrixException& operator=(const MatrixException& other) noexcept {
msg_ = other.msg_;
return *this;
}
MatrixException& operator=(const std::string& msg_copy) noexcept {
msg_ = msg_copy;
return *this;
}
MatrixException& operator=(const char* msg_copy) noexcept {
msg_ = msg_copy;
return *this;
}
~MatrixException() noexcept {};
const char* what() const noexcept { return msg_.c_str(); }
protected:
std::string msg_;
};
} // namespace tensor
#endif // TENSOR_MATRIX_H_ |
axpy_itlmic_kernel.c | //
// Created by Yonghong Yan on 1/7/16.
//
#ifdef __cplusplus
extern "C" {
#endif
#include "axpy.h"
#include <offload.h>
#include <homp.h>
#ifdef USE_INTEL_MKL
#include <mkl.h>
#endif
void axpy_itlmic_wrapper(omp_offloading_t *off, long start_n, long length_n,REAL a,REAL *x,REAL *y) {
int sysid = off->dev->sysid;
int num_cores = off->dev->num_cores;
int i;
// printf("x: %X, y: %X: %d\n", x, y, (length_n - start_n)*sizeof(REAL));
#ifndef ITLMIC_COMBINED_OFFLOADING
#pragma offload target(mic:sysid) in (x: length(0) alloc_if(0) free_if(0)) \
in (y: length(0) alloc_if(0) free_if(0))
#else
#pragma offload target(mic:sysid) in (x: length(length_n-start_n) align(64)) \
inout (y: length(length_n-start_n) align(64))
#endif
{
#ifdef USE_INTEL_MKL
cblas_saxpy(length_n-start_n, a, x, 1, y, 1);
#else
#pragma omp parallel for simd private(i) num_threads(num_cores)
for (i = 0; i < length_n-start_n; i++) {
y[i] = x[i] * a + y[i];
}
#endif
}
// printf("x: %X, y: %X: %d\n", x, y, (length_n - start_n)*sizeof(REAL));
#if 0
// omp_event_t *events = off->events;
// omp_dev_stream_t *stream = off->stream;
// omp_offloading_info_t * off_info = off->off_info;
double alloc_time = omp_get_wtime();
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_start(&events[acc_mapto_event_index], stream, "ACC_MAPTO", "Accumulated time for mapto data movement for all array");
#endif
#pragma offload target(mic:sysid) in (x: length(length_n-start_n) alloc_if(1) free_if(0)) \
in (y: length(length_n-start_n) alloc_if(1) free_if(0))
{
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_mapto_event_index]);
omp_event_record_start(&events[acc_kernel_exe_event_index], stream, "KERN", "Time for kernel (%s) execution", off_info->name);
#endif
alloc_time = omp_get_wtime() - alloc_time;
double kernel_time = omp_get_wtime();
#pragma offload target(mic:sysid) nocopy (x: length(length_n-start_n) alloc_if(0) free_if(0)) \
nocopy (y: length(length_n-start_n) alloc_if(0) free_if(0))
#pragma omp parallel for simd num_threads(240)
for (i = 0; i < length_n-start_n; i++) {
y[i] = x[i] * a + y[i];
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_kernel_exe_event_index]);
omp_event_record_start(&events[acc_mapfrom_event_index], stream, "ACC_MAPFROM", "Accumulated time for mapfrom data movement for all array");
#endif
kernel_time = omp_get_wtime() - kernel_time;
double free_time = omp_get_wtime();
#pragma offload target(mic:sysid) nocopy (x: length(length_n-start_n) alloc_if(0) free_if(1)) \
out (y: length(length_n-start_n) alloc_if(0) free_if(1))
{
}
#if defined (OMP_BREAKDOWN_TIMING)
omp_event_record_stop(&events[acc_mapfrom_event_index]);
#endif
free_time = omp_get_wtime() - free_time;
#endif
// double walltime = omp_get_wtime() - start_timer;
// printf("PASS axpy\n\n");
// printf("Alloc time = %.2f sec\n\n", alloc_time);
// printf("Kernel time = %.2f sec\n\n", kernel_time);
// printf("Free time = %.2f sec\n\n", free_time);
// printf("Total time = %.2f sec\n\n", walltime);
}
#ifdef __cplusplus
}
#endif
|
comms.h | /*
//@HEADER
// *****************************************************************************
//
// XtraPuLP: Xtreme-Scale Graph Partitioning using Label Propagation
// Copyright (2016) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact George M. Slota (gmslota@sandia.gov)
// Siva Rajamanickam (srajama@sandia.gov)
// Kamesh Madduri (madduri@cse.psu.edu)
//
// *****************************************************************************
//@HEADER
*/
#ifndef _COMMS_H_
#define _COMMS_H_
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include "xtrapulp.h"
#include "util.h"
extern int procid, nprocs;
extern bool verbose, debug, verify;
#define MAX_SEND_SIZE 2147483648
#define THREAD_QUEUE_SIZE 1024
struct mpi_data_t {
int32_t* sendcounts;
uint64_t* sendcounts_temp;
int32_t* recvcounts;
uint64_t* recvcounts_temp;
int32_t* sdispls;
int32_t* rdispls;
int32_t* sdispls_cpy;
uint64_t* sdispls_temp;
uint64_t* sendbuf_vert;
int32_t* sendbuf_data;
uint64_t* recvbuf_vert;
int32_t* recvbuf_data;
uint64_t total_recv;
uint64_t total_send;
uint64_t global_queue_size;
};
struct queue_data_t {
uint64_t* queue;
uint64_t* queue_next;
uint64_t* queue_send;
uint64_t queue_size;
uint64_t next_size;
uint64_t send_size;
};
struct thread_queue_t {
int32_t tid;
uint64_t* thread_queue;
uint64_t* thread_send;
uint64_t thread_queue_size;
uint64_t thread_send_size;
} ;
struct thread_comm_t {
int32_t tid;
bool* v_to_rank;
uint64_t* sendcounts_thread;
uint64_t* sendbuf_vert_thread;
int32_t* sendbuf_data_thread ;
int32_t* sendbuf_rank_thread;
uint64_t* thread_starts;
uint64_t thread_queue_size;
};
void init_queue_data(dist_graph_t* g, queue_data_t* q);
void clear_queue_data(queue_data_t* q);
void init_comm_data(mpi_data_t* comm);
void clear_comm_data(mpi_data_t* comm);
void init_thread_queue(thread_queue_t* tq);
void clear_thread_queue(thread_queue_t* tq);
void init_thread_comm(thread_comm_t* tc);
void clear_thread_comm(thread_comm_t* tc);
void init_sendbuf_vid_data(mpi_data_t* comm);
void clear_recvbuf_vid_data(mpi_data_t* comm);
inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q);
inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm,
queue_data_t* q);
inline void update_sendcounts_thread(dist_graph_t* g,
thread_comm_t* tc, uint64_t vert_index);
inline void update_vid_data_queues(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, int32_t* data);
inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id);
inline void empty_queue(thread_queue_t* tq, queue_data_t* q);
inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id);
inline void empty_send(thread_queue_t* tq, queue_data_t* q);
inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, int32_t data_val, int32_t send_rank);
inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm);
inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q)
{
comm->global_queue_size = 0;
uint64_t task_queue_size = q->next_size + q->send_size;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
uint64_t send_begin = (q->send_size * c) / num_comms;
uint64_t send_end = (q->send_size * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = q->send_size;
for (int32_t i = 0; i < nprocs; ++i)
{
comm->sendcounts[i] = 0;
comm->recvcounts[i] = 0;
}
for (uint64_t i = send_begin; i < send_end; ++i)
{
uint64_t ghost_index = q->queue_send[i] - g->n_local;
uint64_t ghost_task = g->ghost_tasks[ghost_index];
++comm->sendcounts[ghost_task];
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t));
if (comm->sendbuf_vert == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (uint64_t i = send_begin; i < send_end; ++i)
{
uint64_t ghost_index = q->queue_send[i] - g->n_local;
uint64_t ghost_task = g->ghost_tasks[ghost_index];
uint64_t vert = g->ghost_unmap[ghost_index];
comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert;
}
MPI_Alltoallv(comm->sendbuf_vert,
comm->sendcounts, comm->sdispls, MPI_UINT64_T,
q->queue_next+q->next_size+sum_recv,
comm->recvcounts, comm->rdispls, MPI_UINT64_T,
MPI_COMM_WORLD);
free(comm->sendbuf_vert);
sum_recv += cur_recv;
}
q->queue_size = q->next_size + sum_recv;
q->next_size = 0;
q->send_size = 0;
uint64_t* temp = q->queue;
q->queue = q->queue_next;
q->queue_next = temp;
}
inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm,
queue_data_t* q)
{
for (int32_t i = 0; i < nprocs; ++i)
comm->recvcounts_temp[i] = 0;
for (int32_t i = 0; i < nprocs; ++i)
comm->sdispls_temp[i] -= comm->sendcounts_temp[i];
MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T,
comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD);
comm->total_recv = 0;
for (int i = 0; i < nprocs; ++i)
comm->total_recv += comm->recvcounts_temp[i];
comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t));
comm->recvbuf_data = (int32_t*)malloc(comm->total_recv*sizeof(uint32_t));
if (comm->recvbuf_vert == NULL || comm->sendbuf_vert == NULL)
throw_err("exchange_vert_data() unable to allocate comm buffers", procid);
comm->global_queue_size = 0;
uint64_t task_queue_size = comm->total_send;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
int32_t* buf_d = (int32_t*)malloc((int32_t)(cur_send)*sizeof(int32_t));
if (buf_v == NULL || buf_d == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j];
int32_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j];
buf_v[comm->sdispls_cpy[i]] = vert;
buf_d[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_v, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_vert+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
MPI_Alltoallv(buf_d, comm->sendcounts,
comm->sdispls, MPI_INT32_T,
comm->recvbuf_data+sum_recv, comm->recvcounts,
comm->rdispls, MPI_INT32_T, MPI_COMM_WORLD);
free(buf_v);
free(buf_d);
sum_recv += cur_recv;
sum_send += cur_send;
}
free(comm->sendbuf_data);
free(comm->sendbuf_vert);
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
comm->global_queue_size = 0;
task_queue_size = comm->total_recv + q->next_size;
MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
q->next_size = 0;
q->send_size = 0;
}
inline void update_sendcounts_thread(dist_graph_t* g,
thread_comm_t* tc,
uint64_t vert_index)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index-g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
++tc->sendcounts_thread[out_rank];
}
}
}
}
inline void update_vid_data_queues(dist_graph_t* g,
thread_comm_t* tc, mpi_data_t* comm,
uint64_t vert_index, int32_t data)
{
for (int32_t i = 0; i < nprocs; ++i)
tc->v_to_rank[i] = false;
uint64_t out_degree = out_degree(g, vert_index);
uint64_t* outs = out_vertices(g, vert_index);
for (uint64_t j = 0; j < out_degree; ++j)
{
uint64_t out_index = outs[j];
if (out_index >= g->n_local)
{
int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
if (!tc->v_to_rank[out_rank])
{
tc->v_to_rank[out_rank] = true;
add_vid_data_to_send(tc, comm,
g->local_unmap[vert_index], data, out_rank);
}
}
}
}
inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id)
{
tq->thread_queue[tq->thread_queue_size++] = vertex_id;
if (tq->thread_queue_size == THREAD_QUEUE_SIZE)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->next_size += THREAD_QUEUE_SIZE;
start_offset -= THREAD_QUEUE_SIZE;
for (uint64_t i = 0; i < THREAD_QUEUE_SIZE; ++i)
q->queue_next[start_offset + i] = tq->thread_queue[i];
tq->thread_queue_size = 0;
}
}
inline void empty_queue(thread_queue_t* tq, queue_data_t* q)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->next_size += tq->thread_queue_size;
start_offset -= tq->thread_queue_size;
for (uint64_t i = 0; i < tq->thread_queue_size; ++i)
q->queue_next[start_offset + i] = tq->thread_queue[i];
tq->thread_queue_size = 0;
}
inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q,
uint64_t vertex_id)
{
tq->thread_send[tq->thread_send_size++] = vertex_id;
if (tq->thread_send_size == THREAD_QUEUE_SIZE)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->send_size += tq->thread_send_size;
start_offset -= tq->thread_send_size;
for (uint64_t i = 0; i < THREAD_QUEUE_SIZE; ++i)
q->queue_send[start_offset + i] = tq->thread_send[i];
tq->thread_send_size = 0;
}
}
inline void empty_send(thread_queue_t* tq, queue_data_t* q)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = q->send_size += tq->thread_send_size;
start_offset -= tq->thread_send_size;
for (uint64_t i = 0; i < tq->thread_send_size; ++i)
q->queue_send[start_offset + i] = tq->thread_send[i];
tq->thread_send_size = 0;
}
inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm,
uint64_t vertex_id, int32_t data_val, int32_t send_rank)
{
tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id;
tc->sendbuf_data_thread[tc->thread_queue_size] = data_val;
tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank;
++tc->thread_queue_size;
++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size == THREAD_QUEUE_SIZE)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; ++i)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_data[tc->thread_starts[cur_rank]] =
tc->sendbuf_data_thread[i];
++tc->thread_starts[cur_rank];
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
}
inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; ++i)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_data[tc->thread_starts[cur_rank]] =
tc->sendbuf_data_thread[i];
++tc->thread_starts[cur_rank];
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
#endif
|
master_taskloop_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop'}}
#pragma omp master taskloop foo
void test_no_clause() {
int i;
#pragma omp master taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop' must be a for loop}}
#pragma omp master taskloop
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp master taskloop'}}
#pragma omp parallel
#pragma omp master taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
#pragma omp master taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp master taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
Par-15-ParConsecutiveNoWaitFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {0, 0, 0, 0};
#pragma omp parallel
{
#pragma omp for nowait
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
}
#pragma omp for
for (int j = 0; j < 4; ++j) {
b[j] = a[j];
}
}
return 0;
}
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/opencv.hpp>
#ifdef USE_CUDA_CV
#include <opencv2/cudaimgproc.hpp>
#endif
#if CV_VERSION_MAJOR >= 3
#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR
#define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE
#define CV_LOAD_IMAGE_UNCHANGED cv::IMREAD_UNCHANGED
#define CV_BGR2RGB cv::COLOR_BGR2RGB
#define CV_BGR2GRAY cv::COLOR_BGR2GRAY
#define CV_GRAY2RGB cv::COLOR_GRAY2RGB
#define CV_YCrCb2RGB cv::COLOR_YCrCb2RGB
#define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR
#define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb
#define CV_INTER_CUBIC cv::INTER_CUBIC
#endif
#include "ext/base64/base64.h"
#include "utils/apitools.h"
#include <random>
namespace dd
{
class DDImg
{
public:
DDImg()
{
}
~DDImg()
{
}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9') || (c == '+' || c == '/' || c == '='))
return true;
else
return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c : s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else
return false;
}
void resize(const cv::Mat &src, cv::Mat &dst, const cv::Size &cvsize,
const double &fx, const double &fy) const
{
#ifdef USE_CUDA_CV
if (_cuda)
{
cv::cuda::GpuMat d_src;
d_src.upload(src);
cv::cuda::GpuMat d_dst;
cv::cuda::resize(d_src, d_dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::cuda::equalizeHist(d_dst, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2YCrCb);
std::vector<cv::cuda::GpuMat> vec_channels;
cv::cuda::split(d_dst, vec_channels);
cv::cuda::equalizeHist(vec_channels[0], vec_channels[0]);
cv::cuda::merge(vec_channels, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2RGB);
else
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
else:
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2RGB);
}
d_dst.download(dst);
}
else
#endif
{
cv::resize(src, dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::equalizeHist(dst, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cvtColor(dst, dst, CV_BGR2YCrCb);
std::vector<cv::Mat> vec_channels;
cv::split(dst, vec_channels);
cv::equalizeHist(vec_channels[0], vec_channels[0]);
cv::merge(vec_channels, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_YCrCb2RGB);
else
cv::cvtColor(dst, dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
else
cv::cvtColor(dst, dst, CV_BGR2RGB);
}
}
}
void scale(const cv::Mat &src, cv::Mat &dst) const
{
float coef = std::min(
static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
resize(src, dst, cv::Size(), coef, coef);
}
/// Apply preprocessing to image and add it to the list of images
/// img_name: name of the image as displayed in error messages
int add_image(const cv::Mat &img, const std::string &img_name)
{
if (_keep_orig)
_orig_imgs.push_back(img);
if (img.empty())
{
_logger->error("empty image {}", img_name);
return -1;
}
_imgs_size.push_back(std::pair<int, int>(img.rows, img.cols));
cv::Mat rimg;
try
{
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0)
{
if (_width == 0 && _height == 0)
{
// Do nothing and keep native resolution. May cause issues if
// batched images are different resolutions
rimg = img;
}
else
{
// Resize so that the larger dimension is set to whichever
// (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different
// resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height))
/ static_cast<double>(currMaxDim);
resize(img, rimg, cv::Size(), scale, scale);
}
}
else
{
// Resize normally to the specified width and height
resize(img, rimg, cv::Size(_width, _height), 0, 0);
}
}
catch (...)
{
throw InputConnectorBadParamException("failed resizing image "
+ img_name);
}
if (_crop_width != 0 && _crop_height != 0)
{
int widthBorder = (_width - _crop_width) / 2;
int heightBorder = (_height - _crop_height) / 2;
try
{
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width,
_crop_height));
}
catch (...)
{
throw InputConnectorBadParamException("failed cropping image "
+ img_name);
}
}
_imgs.push_back(std::move(rimg));
return 0;
}
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(), str.end());
cv::Mat img = cv::Mat(cv::imdecode(
cv::Mat(vdat, false),
_unchanged_data
? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)));
add_image(img, "base64 image");
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0, input.end);
size = input.tellg();
input.seekg(0, input.beg);
char *data = new char[size];
input.read(data, size);
std::string str(data, data + size);
delete[] data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname)
{
cv::Mat img
= cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
return add_image(img, fname);
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
_in_mem = true;
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content, &ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir)
{
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir, false, true, false, subdirs))
throw InputConnectorBadParamException(
"failed reading text subdirectories in data directory " + dir);
_logger->info("imginputfileconn: list subdirs size={}", subdirs.size());
// list files and classes
std::vector<std::pair<std::string, int>> lfiles; // labeled files
std::unordered_map<int, std::string>
hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while (uit != subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit), true, false, true,
subdir_files))
throw InputConnectorBadParamException(
"failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while (fit != subdir_files.end()) // XXX: re-iterating the file
// is not optimal
{
lfiles.push_back(std::pair<std::string, int>((*fit), cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir, true, false, false, test_files);
auto fit = test_files.begin();
while (fit != test_files.end())
{
lfiles.push_back(
std::pair<std::string, int>((*fit), -1)); // -1 for no class
++fit;
}
}
// read images
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string, int> &p : lfiles)
{
cv::Mat img = cv::imread(
p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
add_image(img, p.first);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
_logger->info("read {} images", _imgs.size());
}
return 0;
}
int select_cv_interp() const
{
if (_interp == "nearest")
return cv::INTER_NEAREST;
else if (_interp == "linear")
return cv::INTER_LINEAR;
else if (_interp == "area")
return cv::INTER_AREA;
else if (_interp == "lanczos4")
return cv::INTER_LANCZOS4;
else /* if (_interp == "cubic") */
return cv::INTER_CUBIC; // default
}
std::vector<cv::Mat> _imgs;
std::vector<cv::Mat> _orig_imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int, int>> _imgs_size;
bool _bw = false;
bool _rgb = false;
bool _histogram_equalization = false;
bool _in_mem = false;
bool _unchanged_data = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
float _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
bool _b64 = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
std::string _db_fname;
std::shared_ptr<spdlog::logger> _logger;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn() : InputConnectorStrategy()
{
}
ImgInputFileConn(const ImgInputFileConn &i)
: InputConnectorStrategy(i), _width(i._width), _height(i._height),
_crop_width(i._crop_width), _crop_height(i._crop_height), _bw(i._bw),
_rgb(i._rgb), _unchanged_data(i._unchanged_data),
_test_split(i._test_split), _mean(i._mean),
_has_mean_scalar(i._has_mean_scalar), _scale(i._scale),
_scaled(i._scaled), _scale_min(i._scale_min),
_scale_max(i._scale_max), _keep_orig(i._keep_orig),
_interp(i._interp)
#ifdef USE_CUDA_CV
,
_cuda(i._cuda)
#endif
{
}
~ImgInputFileConn()
{
}
void init(const APIData &ad)
{
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
// optional parameters.
if (ad.has("width"))
_width = ad.get("width").get<int>();
if (ad.has("height"))
_height = ad.get("height").get<int>();
if (ad.has("crop_width"))
{
_crop_width = ad.get("crop_width").get<int>();
if (_crop_width > _width)
{
_logger->error("Crop width must be less than or equal to width");
throw InputConnectorBadParamException(
"Crop width must be less than or equal to width");
}
}
if (ad.has("crop_height"))
{
_crop_height = ad.get("crop_height").get<int>();
if (_crop_height > _height)
{
_logger->error(
"Crop height must be less than or equal to height");
throw InputConnectorBadParamException(
"Crop height must be less than or equal to height");
}
}
if (ad.has("bw"))
_bw = ad.get("bw").get<bool>();
if (ad.has("rgb"))
_rgb = ad.get("rgb").get<bool>();
if (ad.has("histogram_equalization"))
_histogram_equalization = ad.get("histogram_equalization").get<bool>();
if (ad.has("unchanged_data"))
_unchanged_data = ad.get("unchanged_data").get<bool>();
if (ad.has("shuffle"))
_shuffle = ad.get("shuffle").get<bool>();
if (ad.has("seed"))
_seed = ad.get("seed").get<int>();
if (ad.has("test_split"))
_test_split = ad.get("test_split").get<double>();
if (ad.has("mean"))
{
apitools::get_floats(ad, "mean", _mean);
_has_mean_scalar = true;
}
if (ad.has("std"))
{
apitools::get_floats(ad, "std", _std);
}
// Variable size
if (ad.has("scale"))
_scale = ad.get("scale").get<double>();
if (ad.has("scaled") || ad.has("scale_min") || ad.has("scale_max"))
_scaled = true;
if (ad.has("scale_min"))
_scale_min = ad.get("scale_min").get<int>();
if (ad.has("scale_max"))
_scale_max = ad.get("scale_max").get<int>();
// whether to keep original image (for chained ops, e.g. cropping)
if (ad.has("keep_orig"))
_keep_orig = ad.get("keep_orig").get<bool>();
// image interpolation method
if (ad.has("interp"))
_interp = ad.get("interp").get<std::string>();
// timeout
this->set_timeout(ad);
#ifdef USE_CUDA_CV
// image resizing on GPU
if (ad.has("cuda"))
_cuda = ad.get("cuda").get<bool>();
#endif
}
void copy_parameters_to(DDImg &dimg) const
{
dimg._bw = _bw;
dimg._rgb = _rgb;
dimg._histogram_equalization = _histogram_equalization;
dimg._unchanged_data = _unchanged_data;
dimg._width = _width;
dimg._height = _height;
dimg._crop_width = _crop_width;
dimg._crop_height = _crop_height;
dimg._scale = _scale;
dimg._scaled = _scaled;
dimg._scale_min = _scale_min;
dimg._scale_max = _scale_max;
dimg._keep_orig = _keep_orig;
dimg._interp = _interp;
#ifdef USE_CUDA_CV
dimg._cuda = _cuda;
#endif
dimg._logger = _logger;
}
int feature_size() const
{
if (_bw || _unchanged_data)
{
// XXX: only valid for single channels
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height;
else
return _width * _height;
}
else
{
// RGB
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height * 3;
else
return _width * _height * 3;
}
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void get_data(const APIData &ad)
{
// check for raw cv::Mat
if (ad.has("data_raw_img"))
{
if (ad.has("ids"))
_ids = ad.get("ids").get<std::vector<std::string>>();
if (ad.has("meta_uris"))
_meta_uris = ad.get("meta_uris").get<std::vector<std::string>>();
if (ad.has("index_uris"))
_index_uris = ad.get("index_uris").get<std::vector<std::string>>();
_images = ad.get("data_raw_img").get<std::vector<cv::Mat>>();
std::vector<cv::Mat> rimgs;
std::vector<std::string> uris;
int i = 0;
for (auto img : _images)
{
cv::Mat rimg;
resize(img, rimg, cv::Size(_width, _height), 0, 0);
if (_bw && rimg.channels() > 1)
{
cv::Mat bwimg;
cv::cvtColor(rimg, bwimg, CV_BGR2GRAY);
rimg = bwimg;
}
_images_size.push_back(std::pair<int, int>(img.rows, img.cols));
if (_keep_orig)
_orig_images.push_back(std::move(img));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
rimgs.push_back(std::move(rimg));
++i;
}
_images = rimgs;
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(ad);
}
void transform(const APIData &ad)
{
if (ad.has(
"parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
get_data(ad);
if (!_images.empty()) // got ready raw images
{
return;
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
std::vector<std::string> meta_uris;
std::vector<std::string> index_uris;
std::vector<std::string> failed_uris;
#pragma omp parallel for
for (size_t i = 0; i < _uris.size(); i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg(this->_input_timeout);
copy_parameters_to(dimg._ctype);
try
{
if (dimg.read_element(u, this->_logger))
{
_logger->error("no data for image {}", u);
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch (std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
failed_uris.push_back(u);
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
_images.insert(_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
if (_keep_orig)
_orig_images.insert(
_orig_images.end(),
std::make_move_iterator(dimg._ctype._orig_imgs.begin()),
std::make_move_iterator(dimg._ctype._orig_imgs.end()));
_images_size.insert(
_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(
_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(
uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else
uris.push_back(std::to_string(i));
if (!_meta_uris.empty())
meta_uris.push_back(_meta_uris.at(i));
if (!_index_uris.empty())
index_uris.push_back(_index_uris.at(i));
}
}
if (catch_read)
{
for (auto s : failed_uris)
_logger->error("failed reading image {}", s);
throw InputConnectorBadParamException(catch_msg);
}
_uris = uris;
_ids = _uris; // since uris may be in different order than before
// transform
_meta_uris = meta_uris;
_index_uris = index_uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(), _images.end(),
g); // XXX beware: labels are not shuffled, i.e. let's
// not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0 - _test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while (chit != _images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else
++cpos;
++chit;
}
_images.erase(dchit, _images.end());
_logger->info("data split test size={} / remaining data size={}",
_test_images.size(), _images.size());
}
if (_images.empty())
throw InputConnectorBadParamException("no image could be found");
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _orig_images; /**< stored upon request. */
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int, int>> _images_size;
// image parameters
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _bw = false; /**< whether to convert to black & white. */
bool _rgb = false; /**< whether to convert to rgb. */
bool _histogram_equalization
= false; /**< whether to apply histogram equalizer. */
bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */
double _test_split = 0.0; /**< auto-split of the dataset. */
int _seed = -1; /**< shuffling seed. */
std::vector<float>
_mean; /**< mean image pixels, to be subtracted from images. */
std::vector<float> _std; /**< std, to divide image values. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
double _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
};
}
#ifdef USE_CAFFE
#include "caffeinputconns.h"
#endif
#ifdef USE_TF
#include "backends/tf/tfinputconns.h"
#endif
#ifdef USE_DLIB
#include "backends/dlib/dlibinputconns.h"
#endif
#ifdef USE_NCNN
#include "backends/ncnn/ncnninputconns.h"
#endif
#ifdef USE_CAFFE2
#include "backends/caffe2/caffe2inputconns.h"
#endif
#ifdef USE_TENSORRT
#include "backends/tensorrt/tensorrtinputconns.h"
#endif
#ifdef USE_TORCH
#include "backends/torch/torchinputconns.h"
#endif
#endif
|
veccopy-ompt-target-disallow-both.c | #include <stdio.h>
#include <omp.h>
#include "callbacks.h"
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
#pragma omp target parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
/// CHECK: Callback Init:
/// CHECK: Callback Load:
/// CHECK: Callback Target EMI: kind=1 endpoint=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=1
/// CHECK: Callback DataOp EMI: endpoint=2 optype=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=2
/// CHECK: Callback DataOp EMI: endpoint=2 optype=2
/// CHECK: Callback DataOp EMI: endpoint=1 optype=1
/// CHECK: Callback DataOp EMI: endpoint=2 optype=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=2
/// CHECK: Callback DataOp EMI: endpoint=2 optype=2
/// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=3
/// CHECK: Callback DataOp EMI: endpoint=2 optype=3
/// CHECK: Callback DataOp EMI: endpoint=1 optype=3
/// CHECK: Callback DataOp EMI: endpoint=2 optype=3
/// CHECK: Callback DataOp EMI: endpoint=1 optype=4
/// CHECK: Callback DataOp EMI: endpoint=2 optype=4
/// CHECK: Callback DataOp EMI: endpoint=1 optype=4
/// CHECK: Callback DataOp EMI: endpoint=2 optype=4
/// CHECK: Callback Target EMI: kind=1 endpoint=2
/// CHECK: Callback Target EMI: kind=1 endpoint=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=1
/// CHECK: Callback DataOp EMI: endpoint=2 optype=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=2
/// CHECK: Callback DataOp EMI: endpoint=2 optype=2
/// CHECK: Callback DataOp EMI: endpoint=1 optype=1
/// CHECK: Callback DataOp EMI: endpoint=2 optype=1
/// CHECK: Callback DataOp EMI: endpoint=1 optype=2
/// CHECK: Callback DataOp EMI: endpoint=2 optype=2
/// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0
/// CHECK: Callback DataOp EMI: endpoint=1 optype=3
/// CHECK: Callback DataOp EMI: endpoint=2 optype=3
/// CHECK: Callback DataOp EMI: endpoint=1 optype=3
/// CHECK: Callback DataOp EMI: endpoint=2 optype=3
/// CHECK: Callback DataOp EMI: endpoint=1 optype=4
/// CHECK: Callback DataOp EMI: endpoint=2 optype=4
/// CHECK: Callback DataOp EMI: endpoint=1 optype=4
/// CHECK: Callback DataOp EMI: endpoint=2 optype=4
/// CHECK: Callback Target EMI: kind=1 endpoint=2
/// CHECK: Callback Fini:
|
CALPHADFreeEnergyFunctionsTernary.h | #ifndef included_CALPHADFreeEnergyFunctionsTernary
#define included_CALPHADFreeEnergyFunctionsTernary
#include "CALPHADSpeciesPhaseGibbsEnergy.h"
#include "InterpolationType.h"
#include "Phases.h"
#include "datatypes.h"
#include "functions.h"
#include <boost/property_tree/ptree.hpp>
#include <fstream>
#include <iostream>
#include <math.h>
namespace Thermo4PFM
{
class CALPHADFreeEnergyFunctionsTernary
{
public:
CALPHADFreeEnergyFunctionsTernary(boost::property_tree::ptree& input_db,
boost::optional<boost::property_tree::ptree&> newton_db,
const EnergyInterpolationType energy_interp_func_type,
const ConcInterpolationType conc_interp_func_type);
~CALPHADFreeEnergyFunctionsTernary() { delete[] fenergy_diag_filename_; };
double computeFreeEnergy(const double temperature, const double* const conc,
const PhaseIndex pi, const bool gp = false);
void computeDerivFreeEnergy(const double temperature,
const double* const conc, const PhaseIndex pi, double* deriv);
void computeSecondDerivativeFreeEnergy(const double temp,
const double* const conc, const PhaseIndex pi, double* d2fdc2);
bool computeCeqT(const double temperature, double* ceq,
const int maxits = 20, const bool verbose = false);
/// Compute compositions and phase fractions ate ends of tie line
/// passing through nominal composition (c0,c1)
bool computeTieLine(const double temperature, const double c0,
const double c1, double* ceq, const int maxits = 20,
const bool verbose = false);
void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.);
int computePhaseConcentrations(const double temperature,
const double* const conc, const double* const phi, double* x);
void energyVsPhiAndC(const double temperature, const double* const ceq,
const bool found_ceq, const double phi_well_scale,
const int npts_phi = 51,
const int npts_c = 50); // number of compositions to use (>1)
void printEnergyVsComposition(
const double temperature, std::ostream& os, const int npts = 100);
double fchem(const double* const phi, const double* const conc,
const double temperature);
void printEnergyVsPhiHeader(const double temperature, const int nphi,
const int nc0, const int nc1, const double c0min, const double c0max,
const double c1min, const double c1max, std::ostream& os) const;
void printEnergyVsPhi(const double* const conc, const double temperature,
const double phi_well_scale, const int npts, std::ostream& os);
private:
EnergyInterpolationType energy_interp_func_type_;
ConcInterpolationType conc_interp_func_type_;
void readNewtonparameters(boost::property_tree::ptree& newton_db);
void computeTdependentParameters(const double temperature,
CalphadDataType* L_AB_L, CalphadDataType* L_AC_L,
CalphadDataType* L_BC_L, CalphadDataType* L_ABC_L,
CalphadDataType* L_AB_S, CalphadDataType* L_AC_S,
CalphadDataType* L_BC_S, CalphadDataType* L_ABC_S, CalphadDataType* fA,
CalphadDataType* fB, CalphadDataType* fC);
char* fenergy_diag_filename_;
double newton_tol_;
double newton_alpha_;
int newton_maxits_;
bool newton_verbose_;
// Single species energies in each phase
// size 3 for species A, B, C
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[3];
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[3];
// size 4 for L0, L1, L2, L3, with 2 coefficient for linear expansion in T
// a+b*T
CalphadDataType LmixABPhaseL_[4][2];
CalphadDataType LmixABPhaseA_[4][2];
CalphadDataType LmixACPhaseL_[4][2];
CalphadDataType LmixACPhaseA_[4][2];
CalphadDataType LmixBCPhaseL_[4][2];
CalphadDataType LmixBCPhaseA_[4][2];
CalphadDataType LmixABCPhaseL_[3][2];
CalphadDataType LmixABCPhaseA_[3][2];
double (*fun_ptr_arr_[3])(const double){ linear_interp_func,
pbg_interp_func, harmonic_interp_func };
void readParameters(boost::property_tree::ptree& calphad_db);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
// energy of species "is" in phase L,A,B
double getFenergyPhaseL(const short is, const double temperature)
{
return g_species_phaseL_[is].fenergy(temperature);
}
double getFenergyPhaseA(const short is, const double temperature)
{
return g_species_phaseA_[is].fenergy(temperature);
}
CalphadDataType lmix0ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3ABPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3ABPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3ABPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0ABPhaseL(const double temperature)
{
return LmixABPhaseL_[0][0] + LmixABPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ABPhaseL(const double temperature)
{
return LmixABPhaseL_[1][0] + LmixABPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ABPhaseL(const double temperature)
{
return LmixABPhaseL_[2][0] + LmixABPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3ABPhaseL(const double temperature)
{
return LmixABPhaseL_[3][0] + LmixABPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0ABPhaseA(const double temperature)
{
return LmixABPhaseA_[0][0] + LmixABPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ABPhaseA(const double temperature)
{
return LmixABPhaseA_[1][0] + LmixABPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ABPhaseA(const double temperature)
{
return LmixABPhaseA_[2][0] + LmixABPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3ABPhaseA(const double temperature)
{
return LmixABPhaseA_[3][0] + LmixABPhaseA_[3][1] * temperature;
}
CalphadDataType lmix0ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3ACPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3ACPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3ACPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0ACPhaseL(const double temperature)
{
return LmixACPhaseL_[0][0] + LmixACPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ACPhaseL(const double temperature)
{
return LmixACPhaseL_[1][0] + LmixACPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ACPhaseL(const double temperature)
{
return LmixACPhaseL_[2][0] + LmixACPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3ACPhaseL(const double temperature)
{
return LmixACPhaseL_[3][0] + LmixACPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0ACPhaseA(const double temperature)
{
return LmixACPhaseA_[0][0] + LmixACPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ACPhaseA(const double temperature)
{
return LmixACPhaseA_[1][0] + LmixACPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ACPhaseA(const double temperature)
{
return LmixACPhaseA_[2][0] + LmixACPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3ACPhaseA(const double temperature)
{
return LmixACPhaseA_[3][0] + LmixACPhaseA_[3][1] * temperature;
}
CalphadDataType lmix0BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix3BCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix3BCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix3BCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix0BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[0][0] + LmixBCPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[1][0] + LmixBCPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[2][0] + LmixBCPhaseL_[2][1] * temperature;
}
CalphadDataType lmix3BCPhaseL(const double temperature)
{
return LmixBCPhaseL_[3][0] + LmixBCPhaseL_[3][1] * temperature;
}
CalphadDataType lmix0BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[0][0] + LmixBCPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[1][0] + LmixBCPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[2][0] + LmixBCPhaseA_[2][1] * temperature;
}
CalphadDataType lmix3BCPhaseA(const double temperature)
{
return LmixBCPhaseA_[3][0] + LmixBCPhaseA_[3][1] * temperature;
}
// ABC
CalphadDataType lmix0ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix0ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix0ABCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix1ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix1ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix1ABCPhaseA(temperature);
default:
return NAN;
}
}
CalphadDataType lmix2ABCPhase(const PhaseIndex pi, const double temperature)
{
switch (pi)
{
case PhaseIndex::phaseL:
return lmix2ABCPhaseL(temperature);
case PhaseIndex::phaseA:
return lmix2ABCPhaseA(temperature);
default:
return NAN;
}
}
// ABC liquid
CalphadDataType lmix0ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[0][0] + LmixABCPhaseL_[0][1] * temperature;
}
CalphadDataType lmix1ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[1][0] + LmixABCPhaseL_[1][1] * temperature;
}
CalphadDataType lmix2ABCPhaseL(const double temperature)
{
return LmixABCPhaseL_[2][0] + LmixABCPhaseL_[2][1] * temperature;
}
// ABC solid
CalphadDataType lmix0ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[0][0] + LmixABCPhaseA_[0][1] * temperature;
}
CalphadDataType lmix1ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[1][0] + LmixABCPhaseA_[1][1] * temperature;
}
CalphadDataType lmix2ABCPhaseA(const double temperature)
{
return LmixABCPhaseA_[2][0] + LmixABCPhaseA_[2][1] * temperature;
}
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
void computePhasesFreeEnergies(const double temperature,
const double* const hphi, const double conc0, const double conc1,
double& fl, double& fa);
};
void readLmixTernaryParameters(
boost::property_tree::ptree& Lmix_db, CalphadDataType LmixABC[3][2]);
}
#endif
|
HYPRE_IJMatrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_BigInt ilower,
HYPRE_BigInt iupper,
HYPRE_BigInt jlower,
HYPRE_BigInt jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_BigInt info[2];
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
HYPRE_BigInt row0, col0, rowN, colN;
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_IJMatrixOMPFlag(ijmatrix) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper + 1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jlower > jupper + 1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower;
hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1;
hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower;
hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid == 0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs - 1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if hypre_IJMatrixAssumedPart(ijmatrix)
{
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixDestroyParCSR( ijmatrix );
}
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
HYPRE_Int
HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* This is a helper routine to compute a prefix sum of integer values.
*
* The current implementation is okay for modest numbers of threads.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrefixSumInt(HYPRE_Int nvals,
HYPRE_Int *vals,
HYPRE_Int *sums)
{
HYPRE_Int j, nthreads, bsize;
nthreads = hypre_NumThreads();
bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */
if (nvals < nthreads || bsize == 1)
{
sums[0] = 0;
for (j = 1; j < nvals; j++)
{
sums[j] += sums[j - 1] + vals[j - 1];
}
}
else
{
/* Compute preliminary partial sums (in parallel) within each interval */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
sums[j] = 0;
for (i = j + 1; i < n; i++)
{
sums[i] = sums[i - 1] + vals[i - 1];
}
}
/* Compute final partial sums (in serial) for the first entry of every interval */
for (j = bsize; j < nvals; j += bsize)
{
sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1];
}
/* Compute final partial sums (in parallel) for the remaining entries */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = bsize; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
for (i = j + 1; i < n; i++)
{
sums[i] += sums[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"set");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"add");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
return ( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) );
}
else
#endif
{
return ( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_BigInt *ilower,
HYPRE_BigInt *iupper,
HYPRE_BigInt *jlower,
HYPRE_BigInt *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
*ilower = row_partitioning[0];
*iupper = row_partitioning[1] - 1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1] - 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetRowSizesParCSR( ijmatrix, sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixRead
* create IJMatrix on host memory
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt I, J;
HYPRE_Int ncols;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
{
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
}
else
{
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
void *object;
HYPRE_IJMatrixGetObject(matrix, &object);
HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object;
HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix);
if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST )
{
hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename);
}
else
{
HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename);
hypre_ParCSRMatrixDestroy(par_csr2);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetOMPFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixTranspose( HYPRE_IJMatrix matrix_A,
HYPRE_IJMatrix *matrix_AT )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_AT;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_AT) = NULL;
hypre_IJMatrixTranslator(ij_AT) = NULL;
hypre_IJMatrixAssumedPart(ij_AT) = NULL;
hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_AT) = 1;
hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A);
hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A);
hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A);
hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A);
hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A);
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i];
hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixTransposeParCSR(ij_A, ij_AT);
}
else
{
hypre_error_in_arg(1);
}
*matrix_AT = (HYPRE_IJMatrix) ij_AT;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixNorm
*
* TODO: Add other norms
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixNorm( HYPRE_IJMatrix matrix,
HYPRE_Real *norm )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR)
{
hypre_IJMatrixNormParCSR(ijmatrix, norm);
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAdd
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAdd( HYPRE_Complex alpha,
HYPRE_IJMatrix matrix_A,
HYPRE_Complex beta,
HYPRE_IJMatrix matrix_B,
HYPRE_IJMatrix *matrix_C )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B;
hypre_IJMatrix *ij_C;
HYPRE_BigInt *row_partitioning_A;
HYPRE_BigInt *col_partitioning_A;
HYPRE_BigInt *row_partitioning_B;
HYPRE_BigInt *col_partitioning_B;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Check if A and B have the same row/col partitionings */
row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A);
row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B);
col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A);
col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B);
for (i = 0; i < 2; i++)
{
if (row_partitioning_A[i] != row_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same row partitioning!");
return hypre_error_flag;
}
if (col_partitioning_A[i] != col_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same col partitioning!");
return hypre_error_flag;
}
}
ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_C) = NULL;
hypre_IJMatrixTranslator(ij_C) = NULL;
hypre_IJMatrixAssumedPart(ij_C) = NULL;
hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_C) = 1;
hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A);
/* Copy row/col partitioning of A to C */
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i];
hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C);
}
else
{
hypre_error_in_arg(1);
}
*matrix_C = (HYPRE_IJMatrix) ij_C;
return hypre_error_flag;
}
|
distributiongenerator.h | // @file distributiongenerator.h This code provides basic structure for
// distribution generators. This should be inherited by all other distribution
// generators.
// @author TPOC: contact@palisade-crypto.org
//
// @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. THIS SOFTWARE IS
// PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#define LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include "math/backend.h"
#include "utils/prng/blake2engine.h"
// #define FIXED_SEED // if defined, then uses a fixed seed number for
// reproducible results during debug. Use only one OMP thread to ensure
// reproducibility
namespace lbcrypto {
// Defines the PRNG implementation used by PALISADE.
// The cryptographically secure PRNG used by PALISADE is based on BLAKE2 hash
// functions. A user can replace it with a different PRNG if desired by defining
// the same methods as for the Blake2Engine class.
typedef Blake2Engine PRNG;
/**
* @brief The class providing the PRNG capability to all random distribution
* generators in PALISADE. THe security of Ring Learning With Errors (used for
* all crypto capabilities in PALISADE) depends on the randomness of uniform,
* ternary, and Gaussian distributions, which derive their randomness from the
* PRNG.
*/
class PseudoRandomNumberGenerator {
public:
/**
* @brief Returns a reference to the PRNG engine
*/
static void InitPRNG() {
int threads = PalisadeParallelControls.GetNumThreads();
if (threads == 0) {
threads = 1;
}
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < threads; ++i) {
GetPRNG();
}
}
static PRNG &GetPRNG() {
// initialization of PRNGs
if (m_prng == nullptr) {
#pragma omp critical
{
#if defined(FIXED_SEED)
// Only used for debugging in the single-threaded mode.
std::cerr << "**FOR DEBUGGING ONLY!!!! Using fixed initializer for "
"PRNG. Use a single thread only, e.g., OMP_NUM_THREADS=1!"
<< std::endl;
std::array<uint32_t, 16> seed{};
seed[0] = 1;
m_prng = std::make_shared<PRNG>(seed);
#else
// A 512-bit seed is generated for each thread (this roughly corresponds
// to 256 bits of security). The seed is the sum of a random sample
// generated using std::random_device (typically works correctly in
// Linux, MacOS X, and MinGW starting with GCC 9.2) and a BLAKE2 sample
// seeded from current time stamp, a hash of the current thread, and a
// memory location of a heap variable. The BLAKE2 sample is added in
// case random_device is deterministic (happens on MinGW with GCC
// below 9.2). All future calls to PRNG use the seed generated here.
// The code below derives randomness from time, thread id, and a memory
// location of a heap variable. This seed is relevant only if the
// implementation of random_device is deterministic (as in older
// versions of GCC in MinGW)
std::array<uint32_t, 16> initKey{};
// high-resolution clock typically has a nanosecond tick period
// Arguably this may give up to 32 bits of entropy as the clock gets
// recycled every 4.3 seconds
initKey[0] = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
// A thread id is often close to being random (on most systems)
initKey[1] = std::hash<std::thread::id>{}(std::this_thread::get_id());
// On a 64-bit machine, the thread id is 64 bits long
// skip on 32-bit arm architectures
#if !defined(__arm__) && !defined(__EMSCRIPTEN__)
if (sizeof(size_t) == 8)
initKey[2] =
(std::hash<std::thread::id>{}(std::this_thread::get_id()) >> 32);
#endif
// heap variable; we are going to use the least 32 bits of its memory
// location as the counter for BLAKE2 This will increase the entropy of
// the BLAKE2 sample
void *mem = malloc(1);
free(mem);
uint32_t counter = reinterpret_cast<long long>(mem);
PRNG gen(initKey, counter);
std::uniform_int_distribution<uint32_t> distribution(0);
std::array<uint32_t, 16> seed{};
for (uint32_t i = 0; i < 16; i++) {
seed[i] = distribution(gen);
}
std::array<uint32_t, 16> rdseed{};
size_t attempts = 3;
bool rdGenPassed = false;
size_t idx = 0;
while (!rdGenPassed && idx < attempts) {
try {
std::random_device genR;
for (uint32_t i = 0; i < 16; i++) {
// we use the fact that there is no overflow for unsigned integers
// (from C++ standard) i.e., arithmetic mod 2^32 is performed. For
// the seed to be random, it is sufficient for one of the two
// samples below to be random. In almost all practical cases,
// distribution(genR) is random. We add distribution(gen) just in
// case there is an implementation issue with random_device (as in
// older MinGW systems).
rdseed[i] = distribution(genR);
}
rdGenPassed = true;
} catch (std::exception &e) {
}
idx++;
}
for (uint32_t i = 0; i < 16; i++) {
seed[i] += rdseed[i];
}
m_prng = std::make_shared<PRNG>(seed);
#endif
}
}
return *m_prng;
}
private:
// shared pointer to a thread-specific PRNG engine
static std::shared_ptr<PRNG> m_prng;
#if !defined(FIXED_SEED)
// avoid contention on m_prng
// local copies of m_prng are created for each thread
#pragma omp threadprivate(m_prng)
#endif
};
/**
* @brief Abstract class describing generator requirements.
*
* The Distribution Generator defines the methods that must be implemented by a
* real generator. It also holds the single PRNG, which should be called by all
* child class when generating a random number is required.
*
*/
template <typename VecType>
class DistributionGenerator {
public:
DistributionGenerator() {}
virtual ~DistributionGenerator() {}
};
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
|
omp-parallel-for-task.c | #include <omp.h>
#include <stdio.h>
#include <unistd.h>
#define THREADS 2
#define LEN 7
int main(void)
{
int num[LEN] = {0}, k=0;
omp_set_num_threads(THREADS);
#pragma omp parallel for
for (k=0; k<LEN; k++)
{
#pragma omp task
{
num[k] = omp_get_thread_num();
usleep(50);
}
}
for (k=0; k<LEN; k++)
{
printf("%d\n", num[k]);
}
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
fftw.h | #ifndef __FFTW_H__
#define __FFTW_H__
#include <omp.h>
#include <cassert>
#include <iostream>
#include "types.h"
#include "index.h"
#include <fftw3.h>
namespace Impl {
struct FFT {
int nx1_, nx2_, nb_batches_;
int nx1h_, nx2h_;
fftw_plan forward_c2c_plan_, forward_r2c_plan_;
fftw_plan backward_c2c_plan_, backward_c2r_plan_;
// Thread private buffer
complex64 *dptr_buffer_c_;
complex64 *thread_private_buffers_nx1h_, *thread_private_buffers_nx2_;
complex64 *thread_private_buffers_nx2_out_;
float64 *thread_private_buffers_nx1_r2c_;
float64 *thread_private_buffers_nx1_c2r_;
complex_view_3d d_buffer_c_;
complex_view_2d d_thread_private_buffers_nx1h_, d_thread_private_buffers_nx2_;
complex_view_2d d_thread_private_buffers_nx2_out_;
view_2d d_buffers_nx1_r2c_, d_buffers_nx1_c2r_;
FFT(int nx1, int nx2)
: nx1_(nx1), nx2_(nx2), nb_batches_(1) {
init();
}
FFT(int nx1, int nx2, int batch)
: nx1_(nx1), nx2_(nx2), nb_batches_(batch) {
init();
}
virtual ~FFT() {
fftw_destroy_plan(forward_c2c_plan_);
fftw_destroy_plan(backward_c2c_plan_);
fftw_destroy_plan(forward_r2c_plan_);
fftw_destroy_plan(backward_c2r_plan_);
deallocate(d_buffer_c_);
deallocate(d_thread_private_buffers_nx1h_);
deallocate(d_thread_private_buffers_nx2_);
deallocate(d_thread_private_buffers_nx2_out_);
deallocate(d_buffers_nx1_r2c_);
deallocate(d_buffers_nx1_c2r_);
}
void fft(complex64 *dptr_in, complex64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft(forward_c2c_plan_, in, out);
}
void fftr2c(float64 *dptr_in, complex64 *dptr_out) {
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft_r2c(forward_r2c_plan_, dptr_in, out);
}
void ifft(complex64 *dptr_in, complex64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_complex *out = reinterpret_cast<fftw_complex*>(dptr_out);
fftw_execute_dft(backward_c2c_plan_, in, out);
}
void ifftc2r(complex64 *dptr_in, float64 *dptr_out) {
fftw_complex *in = reinterpret_cast<fftw_complex*>(dptr_in);
fftw_execute_dft_c2r(backward_c2r_plan_, in, dptr_out);
}
/* In the host code, we assume LayoutRight (C style)
*/
void fft2(float64 *dptr_in, complex64 *dptr_out) {
if(nb_batches_ == 1) {
fft2_serial(dptr_in, dptr_out);
}
else {
fft2_batch(dptr_in, dptr_out);
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in(nx1h,nx2,batch)
* @param[out] dptr_out(nx1,nx2,batch)
*/
void ifft2(complex64 *dptr_in, float64 *dptr_out) {
if(nb_batches_ == 1) {
ifft2_serial(dptr_in, dptr_out);
}
else {
ifft2_batch(dptr_in, dptr_out);
}
}
private:
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[nx2,nx1)
* @param[out] dptr_out[nx2,nx1h]
*/
void fft2_serial(float64 *dptr_in, complex64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_r2c_[nx1_*tid];
complex64 *thread_private_buffer_nx1h = &thread_private_buffers_nx1h_[nx1h_*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
// Fourier Transform in x direction
#pragma omp for schedule(static)
for(int ix2=0; ix2 < nx2_; ix2++) {
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1_, nx2_);
thread_private_buffer_nx1[ix1] = dptr_in[idx];
}
fftr2c(thread_private_buffer_nx1, thread_private_buffer_nx1h);
// Transpose [nx2,nx1h] -> [nx1h,nx2]
for(int ix1=0; ix1 < nx1h_; ix1++) {
int idx = Index::coord_2D2int(ix2, ix1, nx2_, nx1h_);
dptr_buffer_c_[idx] = thread_private_buffer_nx1h[ix1];
}
}
// Fourier Transform in y direction
#pragma omp for schedule(static)
for(int ix1=0; ix1 < nx1h_; ix1++) {
int offset = nx2_ * ix1;
fft(&dptr_buffer_c_[offset], thread_private_buffer_nx2);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_, nx2_);
dptr_out[idx] = thread_private_buffer_nx2[ix2];
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[batch,nx2,nx1]
* @param[out] dptr_out[batch,nx2,nx1h]
*/
void fft2_batch(float64 *dptr_in, complex64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_r2c_[nx1_*tid];
complex64 *thread_private_buffer_nx1h = &thread_private_buffers_nx1h_[nx1h_*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
// Fourier Transform in x direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib<nb_batches_; ib++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1_, nx2_, nb_batches_);
thread_private_buffer_nx1[ix1] = dptr_in[idx];
}
fftr2c(thread_private_buffer_nx1, thread_private_buffer_nx1h);
// Transpose [batch,nx2,nx1h] -> [batch,nx1h,nx2]
for(int ix1=0; ix1 < nx1h_; ix1++) {
int idx = Index::coord_3D2int(ix2, ix1, ib, nx2_, nx1h_, nb_batches_);
dptr_buffer_c_[idx] = thread_private_buffer_nx1h[ix1];
}
}
}
// Fourier Transform in y direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib<nb_batches_; ib++) {
for(int ix1=0; ix1 < nx1h_; ix1++) {
int offset = nx2_ * Index::coord_2D2int(ix1, ib, nx1h_, nb_batches_);
fft(&dptr_buffer_c_[offset], thread_private_buffer_nx2);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
dptr_out[idx] = thread_private_buffer_nx2[ix2];
}
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for serial case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[nx2,nx1h]
* @param[out] dptr_out[nx2,nx1]
*/
void ifft2_serial(complex64 *dptr_in, float64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_c2r_[(nx1_+2)*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
complex64 *thread_private_buffer_nx2_out = &thread_private_buffers_nx2_out_[nx2_*tid];
// Inverse Fourier Transform in y direction
#pragma omp for schedule(static)
for(int ix1=0; ix1 < nx1h_; ix1++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_,nx2_);
thread_private_buffer_nx2[ix2] = dptr_in[idx];
}
ifft(thread_private_buffer_nx2, thread_private_buffer_nx2_out);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1h_, nx2_);
dptr_buffer_c_[idx] = thread_private_buffer_nx2_out[ix2];
}
}
// Inverse Fourier Transform in x direction
#pragma omp for schedule(static)
for(int ix2=0; ix2 < nx2_; ix2++) {
int offset_in = nx1h_ * ix2;
ifftc2r(&dptr_buffer_c_[offset_in], thread_private_buffer_nx1);
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_2D2int(ix1, ix2, nx1_, nx2_);
dptr_out[idx] = thread_private_buffer_nx1[ix1];
}
}
#pragma omp barrier
}
}
/* @brief 2D FFT wrapper for batched case
* In the host code, we assume LayoutRight (C style)
* @param[in] dptr_in[batch,nx2,nx1h]
* @param[out] dptr_out[batch,nx2,nx1]
*/
void ifft2_batch(complex64 *dptr_in, float64 *dptr_out) {
#pragma omp parallel
{
int tid = omp_get_thread_num();
float64 *thread_private_buffer_nx1 = &thread_private_buffers_nx1_c2r_[(nx1_+2)*tid];
complex64 *thread_private_buffer_nx2 = &thread_private_buffers_nx2_[nx2_*tid];
complex64 *thread_private_buffer_nx2_out = &thread_private_buffers_nx2_out_[nx2_*tid];
// Inverse Fourier Transform in y direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib < nb_batches_; ib++) {
for(int ix1=0; ix1 < nx1h_; ix1++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
thread_private_buffer_nx2[ix2] = dptr_in[idx];
}
ifft(thread_private_buffer_nx2, thread_private_buffer_nx2_out);
for(int ix2=0; ix2 < nx2_; ix2++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1h_, nx2_, nb_batches_);
dptr_buffer_c_[idx] = thread_private_buffer_nx2_out[ix2];
}
}
}
// Inverse Fourier Transform in x direction
#pragma omp for schedule(static), collapse(2)
for(int ib=0; ib < nb_batches_; ib++) {
for(int ix2=0; ix2 < nx2_; ix2++) {
int offset = nx1h_ * Index::coord_2D2int(ix2, ib, nx2_, nb_batches_);
ifftc2r(&dptr_buffer_c_[offset], thread_private_buffer_nx1);
for(int ix1=0; ix1 < nx1_; ix1++) {
int idx = Index::coord_3D2int(ix1, ix2, ib, nx1_, nx2_, nb_batches_);
dptr_out[idx] = thread_private_buffer_nx1[ix1];
}
}
}
#pragma omp barrier
}
}
void init() {
nx1h_ = nx1_/2 + 1;
nx2h_ = nx2_/2 + 1;
assert(nb_batches_ >= 1);
// Initialize fftw
fftw_complex *c_in, *c_out;
fftw_complex *c_in_c2r, *c_out_r2c;
float64 *in, *out;
c_in = fftw_alloc_complex(nx2_);
c_out = fftw_alloc_complex(nx2_);
in = fftw_alloc_real(nx1_);
out = fftw_alloc_real(nx1_+2);
c_in_c2r = fftw_alloc_complex(nx1h_);
c_out_r2c = fftw_alloc_complex(nx1h_);
forward_c2c_plan_ = fftw_plan_dft_1d(nx2_, c_in, c_out, FFTW_FORWARD, FFTW_ESTIMATE);
backward_c2c_plan_ = fftw_plan_dft_1d(nx2_, c_out, c_in, FFTW_BACKWARD, FFTW_ESTIMATE);
forward_r2c_plan_ = fftw_plan_dft_r2c_1d(nx1_, in, c_out_r2c, FFTW_ESTIMATE);
backward_c2r_plan_ = fftw_plan_dft_c2r_1d(nx1_, c_in_c2r, out, FFTW_ESTIMATE);
fftw_free(in); fftw_free(out);
fftw_free(c_in); fftw_free(c_out);
fftw_free(c_in_c2r); fftw_free(c_out_r2c);
// Malloc thread private buffers
size_t nb_threads=0;
#pragma omp parallel
nb_threads = static_cast<size_t>( omp_get_num_threads() );
std::cout << "nb_threads = " << nb_threads << std::endl;
size_t nx1 = nx1_, nx2 = nx2_, nx1h = nx1h_, nb_batches = nb_batches_;
allocate(d_buffer_c_, {nx2, nx1h, nb_batches});
allocate(d_thread_private_buffers_nx1h_, {nx1h,nb_threads});
allocate(d_thread_private_buffers_nx2_, {nx2,nb_threads});
allocate(d_thread_private_buffers_nx2_out_, {nx2,nb_threads});
allocate(d_buffers_nx1_r2c_, {nx1, nb_threads});
allocate(d_buffers_nx1_c2r_, {nx1+2,nb_threads});
dptr_buffer_c_ = d_buffer_c_.raw();
thread_private_buffers_nx1h_ = d_thread_private_buffers_nx1h_.raw();
thread_private_buffers_nx2_ = d_thread_private_buffers_nx2_.raw();
thread_private_buffers_nx2_out_ = d_thread_private_buffers_nx2_out_.raw();
thread_private_buffers_nx1_r2c_ = d_buffers_nx1_r2c_.raw();
thread_private_buffers_nx1_c2r_ = d_buffers_nx1_c2r_.raw();
}
};
};
#endif
|
integrate_c.c | #include "integrate_c.h" /* just to assure that declarations match */
#include <stdio.h>
#include <stdlib.h>
double f(double x)
{
return x*x;
}
double lib_integrate_c(double a, double b, int N)
{
double s = 0.0;
double dx = (b-a)/N;
int i;
if(dx == 0.0) {
fprintf(stderr, "dx == 0!\n");
return 0.0;
}
for(i = 0; i < N; i++) {
s += f(a + (i + 1./2.)*dx)*dx;
}
return s;
}
double lib_integrate_c_omp(double a, double b, int N)
{
double s = 0.0;
double dx = (b-a)/N;
int i;
if(dx == 0.0) {
fprintf(stderr, "dx == 0!\n");
return 0.0;
}
#pragma omp parallel for reduction(+:s) lastprivate(a)
for(i = 0; i < N; i++) {
s += f(a + (i + 1./2.)*dx)*dx;
}
return s;
}
|
icv-2.c | /* { dg-do run { target *-*-linux* *-*-gnu* *-*-freebsd* } } */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#endif
#include <pthread.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
pthread_barrier_t bar;
void *tf (void *p)
{
int l;
if (p)
omp_set_num_threads (3);
pthread_barrier_wait (&bar);
if (!p)
omp_set_num_threads (6);
pthread_barrier_wait (&bar);
omp_set_dynamic (0);
if (omp_get_max_threads () != (p ? 3 : 6))
abort ();
l = 0;
#pragma omp parallel num_threads (6) reduction (|:l)
{
l |= omp_get_max_threads () != (p ? 3 : 6);
omp_set_num_threads ((p ? 3 : 6) + omp_get_thread_num ());
l |= omp_get_max_threads () != ((p ? 3 : 6) + omp_get_thread_num ());
}
if (l)
abort ();
return NULL;
}
int
main (void)
{
pthread_t th;
pthread_barrier_init (&bar, NULL, 2);
pthread_create (&th, NULL, tf, NULL);
tf ("");
pthread_join (th, NULL);
return 0;
}
|
kthvalue_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, typename Type>
static void getKthvalue(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out,
Type* t_indices, const int& k) {
bool partial_sort_flag = (k * 64) < input_width;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
if (partial_sort_flag) {
std::partial_sort(
col_vec.begin(), col_vec.begin() + k, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
} else {
std::nth_element(
col_vec.begin(), col_vec.begin() + k - 1, col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
}
t_out[i] = col_vec[k - 1].first;
t_indices[i] = col_vec[k - 1].second;
}
}
template <typename T, typename Type>
static void kthvalueAssign(const Type& input_height, const Type& input_width,
const int& input_dim, const framework::Tensor* input,
const framework::Tensor* indices, T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
auto e_indices = framework::EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices =
framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
template <typename DeviceContext, typename T>
class KthvalueCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("X");
auto* output = context.Output<framework::Tensor>("Out");
auto* indices = context.Output<framework::Tensor>("Indices");
const auto& in_dims = input->dims();
int k = static_cast<int>(context.Attr<int>("k"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
int axis = static_cast<int>(context.Attr<int>("axis"));
if (axis < 0) axis += in_dims.size();
T* output_data = output->mutable_data<T>(context.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace());
auto out_dims = output->dims();
if (axis == in_dims.size() - 1) {
const int64_t& input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(), input,
output_data, indices_data, k);
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
framework::DDim tmp_out_dims = framework::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
framework::DDim trans_dims(in_dims);
framework::DDim trans_out_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
framework::Tensor trans_inp;
trans_inp.mutable_data<T>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input,
&trans_inp, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
framework::Tensor tmp_out, tmp_indices;
T* t_out = tmp_out.mutable_data<T>(trans_out_dims, context.GetPlace());
auto* t_ind =
tmp_indices.mutable_data<int64_t>(trans_out_dims, context.GetPlace());
getKthvalue<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_inp, t_out, t_ind, k);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, tmp_indices, indices, trans);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
};
template <typename DeviceContext, typename T>
class KthvalueGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<framework::Tensor>("X");
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<framework::Tensor>("Indices");
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
int axis = static_cast<int>(context.Attr<int>("axis"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
auto in_dims = x->dims();
auto out_dims = indices->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(out_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(out_dims[i - 1]);
}
out_dims = framework::make_ddim(tmp_out_shape);
}
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
if (axis == in_dims.size() - 1) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
memset(x_grad_data, 0, x_grad->numel() * sizeof(T));
if (keepdim) {
kthvalueAssign(input_height, input_width, in_dims.size(), out_grad,
indices, x_grad_data);
} else {
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
kthvalueAssign(input_height, input_width, in_dims.size(), &out_grad_tmp,
&indices_tmp, x_grad_data);
}
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(out_dims.size() - 1);
for (int i = axis + 1; i < out_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
framework::DDim trans_dims(out_dims);
framework::DDim trans_in_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = out_dims[trans[i]];
trans_in_dims[i] = in_dims[trans[i]];
}
framework::Tensor trans_dO, trans_ind;
trans_dO.mutable_data<T>(trans_dims, context.GetPlace());
trans_ind.mutable_data<int64_t>(trans_dims, context.GetPlace());
int ndims = trans.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
if (keepdim) {
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, *out_grad, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, *indices, &trans_ind, trans);
} else {
framework::Tensor out_grad_tmp, indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, out_grad_tmp, &trans_dO, trans);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, indices_tmp, &trans_ind, trans);
}
const int64_t input_height = framework::product(
framework::slice_ddim(trans_in_dims, 0, trans_in_dims.size() - 1));
const int64_t input_width = trans_in_dims[trans_in_dims.size() - 1];
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_in_dims, context.GetPlace());
memset(t_out, 0, x_grad->numel() * sizeof(T));
kthvalueAssign<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_dO, &trans_ind, t_out);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
x_grad, trans);
}
}
};
} // namespace operators
} // namespace paddle
|
declare-variant-8.c | /* { dg-do compile { target c } } */
/* { dg-additional-options "-fdump-tree-gimple" } */
void f01 (void);
#pragma omp declare variant (f01) match (user={condition(6 == 7)},implementation={vendor(gnu)})
void f02 (void);
void f03 (void);
#pragma omp declare variant (f03) match (user={condition(6 == 6)},implementation={atomic_default_mem_order(seq_cst)})
void f04 (void);
void f05 (void);
#pragma omp declare variant (f05) match (user={condition(1)},implementation={atomic_default_mem_order(relaxed)})
void f06 (void);
#pragma omp requires atomic_default_mem_order(seq_cst)
void f07 (void);
#pragma omp declare variant (f07) match (construct={parallel,for},device={kind("any")})
void f08 (void);
void f09 (void);
#pragma omp declare variant (f09) match (construct={parallel,for},implementation={vendor("gnu")})
void f10 (void);
void f11 (void);
#pragma omp declare variant (f11) match (construct={parallel,for})
void f12 (void);
void f13 (void);
#pragma omp declare variant (f13) match (construct={parallel,for})
void f14 (void);
#pragma omp declare target to (f13, f14)
void f15 (void);
#pragma omp declare variant (f15) match (implementation={vendor(llvm)})
void f16 (void);
void f17 (void);
#pragma omp declare variant (f17) match (construct={target,parallel})
void f18 (void);
void f19 (void);
#pragma omp declare variant (f19) match (construct={target,parallel})
void f20 (void);
void f21 (void);
#pragma omp declare variant (f21) match (construct={teams,parallel})
void f22 (void);
void f23 (void);
#pragma omp declare variant (f23) match (construct={teams,parallel,for})
void f24 (void);
void f25 (void);
#pragma omp declare variant (f25) match (construct={teams,parallel})
void f26 (void);
void f27 (void);
#pragma omp declare variant (f27) match (construct={teams,parallel,for})
void f28 (void);
void f29 (void);
#pragma omp declare variant (f29) match (implementation={vendor(gnu)})
void f30 (void);
void f31 (void);
#pragma omp declare variant (f31) match (construct={teams,parallel,for})
void f32 (void);
void f33 (void);
#pragma omp declare variant (f33) match (device={kind("any\0any")}) /* { dg-warning "unknown property '.any.000any.' of 'kind' selector" } */
void f34 (void);
void f35 (void);
#pragma omp declare variant (f35) match (implementation={vendor("gnu\0")}) /* { dg-warning "unknown property '.gnu.000.' of 'vendor' selector" } */
void f36 (void);
void
test1 (void)
{
int i;
f02 (); /* { dg-final { scan-tree-dump-times "f02 \\\(\\\);" 1 "gimple" } } */
f04 (); /* { dg-final { scan-tree-dump-times "f03 \\\(\\\);" 1 "gimple" } } */
f06 (); /* { dg-final { scan-tree-dump-times "f06 \\\(\\\);" 1 "gimple" } } */
#pragma omp parallel
#pragma omp for
for (i = 0; i < 1; i++)
f08 (); /* { dg-final { scan-tree-dump-times "f07 \\\(\\\);" 1 "gimple" } } */
#pragma omp parallel for
for (i = 0; i < 1; i++)
f10 (); /* { dg-final { scan-tree-dump-times "f09 \\\(\\\);" 1 "gimple" } } */
#pragma omp for
for (i = 0; i < 1; i++)
#pragma omp parallel
f12 (); /* { dg-final { scan-tree-dump-times "f12 \\\(\\\);" 1 "gimple" } } */
#pragma omp parallel
#pragma omp target
#pragma omp for
for (i = 0; i < 1; i++)
f14 (); /* { dg-final { scan-tree-dump-times "f14 \\\(\\\);" 1 "gimple" } } */
f16 (); /* { dg-final { scan-tree-dump-times "f16 \\\(\\\);" 1 "gimple" } } */
f34 (); /* { dg-final { scan-tree-dump-times "f34 \\\(\\\);" 1 "gimple" } } */
f36 (); /* { dg-final { scan-tree-dump-times "f36 \\\(\\\);" 1 "gimple" } } */
}
#pragma omp declare target
void
test2 (void)
{
#pragma omp parallel
f18 (); /* { dg-final { scan-tree-dump-times "f17 \\\(\\\);" 1 "gimple" } } */
}
#pragma omp end declare target
void test3 (void);
#pragma omp declare target to (test3)
void
test3 (void)
{
#pragma omp parallel
f20 (); /* { dg-final { scan-tree-dump-times "f20 \\\(\\\);" 1 "gimple" } } */
}
void
f21 (void)
{
int i;
#pragma omp for
for (i = 0; i < 1; i++)
f24 (); /* { dg-final { scan-tree-dump-times "f23 \\\(\\\);" 1 "gimple" } } */
}
void
f26 (void)
{
int i;
#pragma omp for
for (i = 0; i < 1; i++)
f28 (); /* { dg-final { scan-tree-dump-times "f28 \\\(\\\);" 1 "gimple" } } */
}
void
f29 (void)
{
int i;
#pragma omp for
for (i = 0; i < 1; i++)
f32 (); /* { dg-final { scan-tree-dump-times "f32 \\\(\\\);" 1 "gimple" } } */
}
|
SliceableArrays.h | /*
* MIT License
*
* Copyright (c) 2017 Daniel Politte
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files(the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <iostream>
#include <cassert>
#include <numeric>
#include <vector>
#include <array>
#ifndef __H_SLICEABLEARRAYS
#define __H_SLICEABLEARRAYS
namespace SliceableArrays {
/*
* Sliceable Arrays
*
* Features:
* - 0-indexed
* - Column-major
* - Arbitrarily high dimensionality, known at compile time
* - Fixed-size
* - Dimensions need not be known until runtime
*
* descended from class described at:
* http://www.cplusplus.com/forum/articles/17108/
*
* AN IMPORTANT DETAIL different from usual C arrays:
* The elements are stored internally in 2nd dimension-major order. That is, a
* 2D array with dimensions n by m has a memory layout of m series n elements,
* in which each series shares a common 2nd index. The first index changes the
* fastest as memory is traversed. This also applies to the higher-dimensioned
* arrays; the first dimension changes the fastest, and the last dimension
* changes most slowly.
*
*/
template <typename T, size_t NDIMS>
class ArrayND {
private:
const std::array<size_t, NDIMS> dims_;
const size_t numEls_;
bool dataAutoDestroy_;
T *arr_;
size_t getFlattenedIndex(std::vector<size_t>& position) const {
// make sure we have the proper number of indices
if (position.size() != NDIMS) {
std::cerr << "Illegally-sized index into ArrayND!" << std::endl;
}
position.resize(NDIMS, 0); // tack on additional zeros as necc.
// If we're debugging, check out validity of each position vector item (i.e., less than its dim's size)
assert(indexValid(position, dims_));
size_t index = 0;
for (size_t i = dims_.size() - 1; i > 0; --i) {
index += position[i];
index *= dims_[i - 1];
}
index += position[0];
return index;
}
size_t buildEndOfIndex(size_t posThis) {
// The tail
return posThis;
}
template<typename... Args>
size_t buildEndOfIndex(size_t posThis, Args... posRest) {
const size_t correspondingDimensionIndex = NDIMS - (1 + sizeof...(posRest));
// If we could have the full position list as a vector, posThis would be pos[correspondingDimensionIndex]
size_t tailResult = buildEndOfIndex(posRest...);
return posThis + (dims_[correspondingDimensionIndex] * tailResult);
}
static bool indexValid(const std::vector<size_t>& pos, const std::array<size_t, NDIMS>& dims) {
for (size_t i = 0; i < pos.size(); ++i) {
if (pos[i] >= dims[i]) {
return false;
}
}
return true;
}
static void compactArgsToArrayInner(std::array<size_t, NDIMS>& resultArray, size_t only) {
resultArray[NDIMS - 1] = only;
}
template<typename... Args>
static void compactArgsToArrayInner(std::array<size_t, NDIMS>& resultArray, size_t first, Args... vals) {
resultArray[NDIMS - (sizeof...(vals)+1)] = first; // the +1 is to account for first not being counted among the args
compactArgsToArrayInner(resultArray, vals...);
}
template<typename... Args>
static std::array<size_t, NDIMS> compactArgsToArray(Args... vals) {
static_assert(sizeof...(vals) == NDIMS, "Requires NDIMS arguments exactly");
std::array<size_t, NDIMS> resultArray; // an array we'll gradually fill with copies of the relavent values
compactArgsToArrayInner(resultArray, vals...);
return resultArray;
}
// allocates space on heap for this matrix
void initializeData() {
if (numEls_ > 0) {
arr_ = new T[numEls_];
}
}
// Deallocate space we used, if applicable
void destroyDataIfNecessary() {
if (dataAutoDestroy_) {
delete[] arr_;
}
}
public:
/*
* Constructor & Deconstructor: The ArrayND class is responsible for the
* management of the space allocated/adopted for the matrix's data
*/
// Constructor in which user doesn't provide a data pointer, and space is automatically alloc'd and marked for destruction
template<typename... Args>
ArrayND(size_t dim1, Args... remainingDims)
: dims_(compactArgsToArray(dim1, remainingDims...)),
numEls_(accumulate(dims_.begin(), dims_.end(), 1, [](size_t a, size_t b) -> size_t {return a*b; })),
dataAutoDestroy_(true),
arr_(0)
{
static_assert(NDIMS == sizeof...(remainingDims)+1, "ArrayND constructor requires exactly as many size arguments as the array has dimensions");
initializeData();
}
// Constructor in which user provides a data pointer, and that data is not marked for destruction
template<typename... Args>
ArrayND(T* dataPtr, size_t dim1, Args... remainingDims)
: dims_(compactArgsToArray(dim1, remainingDims...)),
numEls_(accumulate(dims_.begin(), dims_.end(), 1, [](size_t a, size_t b) -> size_t {return a*b; })),
dataAutoDestroy_(false),
arr_(dataPtr)
{
static_assert(NDIMS == sizeof...(remainingDims)+1, "ArrayND constructor requires exactly as many size arguments as the array has dimensions");
}
// copy constructor: deep copy of data
ArrayND(ArrayND& other)
: arr_(0),
dataAutoDestroy_(true), // since we always allocate space here
numEls_(other.numEls_), dims_(other.dims_)
{
initializeData(); // allocating space automatically
for (size_t i = 0; i < numEls_; ++i) {
arr_[i] = other.arr_[i];
}
}
// copy assignment operator: deep copy of data
ArrayND& operator=(ArrayND& other) {
dataAutoDestroy_ = true; // since we always allocate space here
numEls_ = other.numEls_;
dims_ = other.dims_;
initializeData(); // allocating space automatically
for (size_t i = 0; i < numEls_; ++i) {
arr_[i] = other.arr_[i];
}
return *this;
}
// move constructor
ArrayND(ArrayND&& other) noexcept
: arr_(other.arr_), dataAutoDestroy_(other.dataAutoDestroy_),
numEls_(other.numEls_), dims_(other.dims_)
{
// get old one to state where running destructor is safe
other.arr_ = nullptr;
}
// move assignment operator
ArrayND& operator=(ArrayND&& other) {
if (this != &rhs) {
destroyDataIfNecessary(); // flush data that was at new location, if any
arr_ = other.arr_;
dataAutoDestroy_ = other.dataAutoDestroy_;
numEls_ = other.numEls_;
dims_ = other.dims_;
// get old one to state where running destructor is safe
other.arr_ = nullptr;
}
return *this;
}
// destructor
~ArrayND()
{
destroyDataIfNecessary();
}
// get dims
size_t numEls() const { return numEls_; }
// Get the number of elements along the nth dimension (counting from 0, of course)
size_t getDim(size_t n) const {
assert(n < dims_.size());
return dims_[n];
}
// 1-D indexing, which is common to all dimensionalities
T ind(size_t i) const {
assert(i >= 0 && i < numEls_);
return arr_[i];
}
T& ind(size_t i) {
assert(i >= 0 && i < numEls_);
return arr_[i];
}
T operator[](size_t i) const {
return ind(i);
}
T& operator[](size_t i) {
return ind(i);
}
// non-general multi-D indexing, only available to certain dimensionalities
T ind(size_t p0, size_t p1) const {
static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions");
return ind(getOffsetAtIndex(p0, p1));
}
T& ind(size_t p0, size_t p1) {
static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions");
return ind(getOffsetAtIndex(p0, p1));
}
size_t getOffsetAtIndex(size_t p0, size_t p1) const {
static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
assert(p0 < m && p1 < n);
return p1*m + p0;
}
T ind(size_t p0, size_t p1, size_t p2) const {
static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2));
}
T& ind(size_t p0, size_t p1, size_t p2) {
static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2));
}
size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2) const {
static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
size_t p = getDim(2);
assert(p0 < m && p1 < n && p2 < p);
return (p2*n + p1)*m + p0;
}
T ind(size_t p0, size_t p1, size_t p2, size_t p3) const {
static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3));
}
T& ind(size_t p0, size_t p1, size_t p2, size_t p3) {
static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3));
}
size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3) const {
static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
size_t p = getDim(2);
size_t q = getDim(3);
assert(p0 < m && p1 < n && p2 < p && p3 < q);
return ((p3*p + p2)*n + p1)*m + p0;
}
T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) const {
static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4));
}
T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) {
static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4));
}
size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) const {
static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
size_t p = getDim(2);
size_t q = getDim(3);
size_t r = getDim(4);
assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r);
return (((p4*q + p3)*p + p2)*n + p1)*m + p0;
}
T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) const {
static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5));
}
T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) {
static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5));
}
size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) const {
static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
size_t p = getDim(2);
size_t q = getDim(3);
size_t r = getDim(4);
size_t s = getDim(5);
assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r && p5 < s);
return ((((p5*r + p4)*q + p3)*p + p2)*n + p1)*m + p0;
}
T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) const {
static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5, p6));
}
T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) {
static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions");
return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5, p6));
}
size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) const {
static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions");
size_t m = getDim(0);
size_t n = getDim(1);
size_t p = getDim(2);
size_t q = getDim(3);
size_t r = getDim(4);
size_t s = getDim(5);
size_t t = getDim(6);
assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r && p5 < s && p6 < t);
return (((((p6*s + p5)*r + p4)*q + p3)*p + p2)*n + p1)*m + p0;
}
// TODO: this should be destroyed if general version of 'ind' is good
T indexGeneral(const std::vector<size_t>& position) const {
// A slow but general indexing function, so larger dimensions can kinda stand on its own
size_t index = getFlattenedIndex(position);
return ind(index);
}
T& indexGeneral(const std::vector<size_t>& position) {
// A slow but general indexing function, so larger dimensions can kinda stand on its own
size_t index = getFlattenedIndex(position);
return ind(index);
}
template<typename... Args>
T ind(size_t posFirst, Args... posRest) const {
// Generic offset computation for arbitrarily high dimensions.
// This offset building is equiv to using getFlattenedIndex if we
// had all the position elements in a container
size_t offset = buildEndOfIndex(posFirst, posRest...);
return ind(offset);
}
template<typename... Args>
T& ind(size_t posFirst, Args... posRest) {
// Generic offset computation for arbitrarily high dimensions.
// This offset building is equiv to using getFlattenedIndex if we
// had all the position elements in a container
size_t offset = buildEndOfIndex(posFirst, posRest...);
return ind(offset);
}
// reset entire matrix to a value
void fill(T val) {
#pragma omp parallel for
for (int i_tmp = 0; i_tmp < (int)numEls_; ++i_tmp) {
size_t i = i_tmp; // To provide OpenMP 2.0 compatibility
arr_[i] = val;
}
}
// Retrieves the location of internal data
T* getData() const {
return arr_;
}
// Modifies the location of internal data. Will not autodestroy this new location by default
void setData(T *data) {
arr_ = data;
dataAutoDestroy_ = true;
}
// choose whether the internal data of this array will be deleted when it is destructed.
// The default value when this has not been called is true.
void setDataAutoDestroy(bool isDataForfeit) {
dataAutoDestroy_ = isDataForfeit;
}
bool getDataAutoDestroy() {
return dataAutoDestroy_;
}
};
template <typename T> using Array1D = ArrayND<T, 1>;
template <typename T> using Array2D = ArrayND<T, 2>;
template <typename T> using Array3D = ArrayND<T, 3>;
template <typename T> using Array4D = ArrayND<T, 4>;
template <typename T> using Array5D = ArrayND<T, 5>;
template <typename T> using Array6D = ArrayND<T, 6>;
template <typename T> using Array7D = ArrayND<T, 7>;
};
#endif /* __H_SLICEABLEARRAYS */
|
caesar_encode.c | #include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <strings.h>
#include <time.h>
#include <omp.h>
#include "mpi.h"
#define CHARS 100000
// caesar encode paralelizado
int main(int argc, char * argv[]) {
// variables para MPI
int myid, numprocs;
char hostname[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
// variables
int data = CHARS/numprocs;
char local[data];
double start, stop;
int maxdata = data;
// aqui es donde cambiamos la llave para encriptar
int llave = 3;
char *path;
FILE *in_file;
char mensaje[CHARS];
char temp;
int i;
FILE *out_file;
if (myid == 0){
// path para el archivo a leer
path = "texto_largo.txt";
in_file = fopen(path, "r");
//mensaje por si no encuentra le archivo
if (in_file == NULL) {
printf("ERROR, no se pudo abrir el archivo txt\n");
exit(-1);
}
// variable para asignarle nuestro mensaje al file que adjuntamos
fread(mensaje, CHARS, 1, in_file);
}
// dividimos el mensaje
MPI_Scatter(mensaje, data, MPI_CHAR, local, data, MPI_CHAR, 0, MPI_COMM_WORLD);
printf("Mensaje original: %s\n", mensaje);
start = omp_get_wtime();
// paralelizamos el algoritmo de encriptado
#pragma omp parallel for
for(i=0; i < maxdata; i++) {
temp = local[i];
if(temp >= 'a' && temp <= 'z'){
temp = temp + llave;
if(temp > 'z'){
temp = temp - 'z' + 'a' - 1;
}
if(temp < 'a'){
temp = temp - 'a' + 'z' + 1;
}
local[i] = temp;
} else if (temp >= 'A' && temp <= 'Z'){
temp = temp + llave;
if(temp > 'Z'){
temp = temp - 'Z' + 'A' - 1;
}
if(temp < 'A'){
temp = temp - 'A' + 'Z' + 1;
}
local[i] = temp;
}
}
// recolectamos el mensaje
MPI_Gather(local, data, MPI_CHAR, mensaje, data, MPI_CHAR, 0, MPI_COMM_WORLD);
if (myid == 0) {
printf("Mensaje encriptado: %s\n", mensaje);
out_file = fopen("texto_encriptado.txt", "w");
fprintf(out_file, "%s", mensaje);
fclose(out_file);
stop = omp_get_wtime();
printf("Tiempo de ejecución de la sección paralela = %f \n", stop-start);
}
MPI_Finalize();
return 0;
} |
sections.c | #include<stdio.h>
#include<omp.h>
void section_a(int id){
printf("Section A %d\n", id);
}
void section_b(int id){
printf("Section B %d\n", id);
}
void section_c(int id){
printf("Section C %d\n", id);
}
int main(){
int id;
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
section_a(omp_get_thread_num());
#pragma omp section
section_b(omp_get_thread_num());
#pragma omp section
section_c(omp_get_thread_num());
}
id = omp_get_thread_num();
printf("Parallel block thread %d.\n", id);
}
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L)
#endif
for (y=0; y < (ssize_t) Cr_image->rows; y++)
{
register const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) Cr_image->columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*((double) Ar->red*Br->red+(double) Ai->red*Bi->red);
Ci->red=gamma*((double) Ai->red*Br->red-(double) Ar->red*Bi->red);
gamma=PerceptibleReciprocal((double) Br->green*Br->green+(double)
Bi->green*Bi->green+snr);
Cr->green=gamma*((double) Ar->green*Br->green+(double)
Ai->green*Bi->green);
Ci->green=gamma*((double) Ai->green*Br->green-(double)
Ar->green*Bi->green);
gamma=PerceptibleReciprocal((double) Br->blue*Br->blue+(double)
Bi->blue*Bi->blue+snr);
Cr->blue=gamma*((double) Ar->blue*Br->blue+(double)
Ai->blue*Bi->blue);
Ci->blue=gamma*((double) Ai->blue*Br->blue-(double)
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal((double) Br->opacity*Br->opacity+
(double) Bi->opacity*Bi->opacity+snr);
Cr->opacity=gamma*((double) Ar->opacity*Br->opacity+(double)
Ai->opacity*Bi->opacity);
Ci->opacity=gamma*((double) Ai->opacity*Br->opacity-(double)
Ar->opacity*Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt((double) Ar->red*Ar->red+(double) Ai->red*Ai->red);
Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt((double) Ar->green*Ar->green+(double)
Ai->green*Ai->green);
Ci->green=atan2((double) Ai->green,(double) Ar->green)/
(2.0*MagickPI)+0.5;
Cr->blue=sqrt((double) Ar->blue*Ar->blue+(double) Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt((double) Ar->opacity*Ar->opacity+(double)
Ai->opacity*Ai->opacity);
Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/
(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*((double) Ar->red*Br->red-(double)
Ai->red*Bi->red);
Ci->red=QuantumScale*((double) Ai->red*Br->red+(double)
Ar->red*Bi->red);
Cr->green=QuantumScale*((double) Ar->green*Br->green-(double)
Ai->green*Bi->green);
Ci->green=QuantumScale*((double) Ai->green*Br->green+(double)
Ar->green*Bi->green);
Cr->blue=QuantumScale*((double) Ar->blue*Br->blue-(double)
Ai->blue*Bi->blue);
Ci->blue=QuantumScale*((double) Ai->blue*Br->blue+(double)
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*((double) Ar->opacity*Br->opacity-
(double) Ai->opacity*Bi->opacity);
Ci->opacity=QuantumScale*((double) Ai->opacity*Br->opacity+
(double) Ar->opacity*Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (Cr_image->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
network.h | // == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// network.h: The main artificial neural network graph for mojo
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <iostream> // cout
#include <fstream>
#include <sstream>
#include <map>
#include <vector>
#include "layer.h"
#include "solver.h"
#include "activation.h"
#include "cost.h"
// hack for VS2010 to handle c++11 for(:)
#if (_MSC_VER == 1600)
#ifndef __for__
#define __for__ for each
#define __in__ in
#endif
#else
#ifndef __for__
#define __for__ for
#define __in__ :
#endif
#endif
#if defined(MOJO_CV2) || defined(MOJO_CV3)
#ifdef MOJO_CV2
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#pragma comment(lib, "opencv_core249")
#pragma comment(lib, "opencv_highgui249")
#pragma comment(lib, "opencv_imgproc249")
#pragma comment(lib, "opencv_contrib249")
#else //#ifdef MOJO_CV3
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#pragma comment(lib, "opencv_world310")
#endif
#endif
#define blocksize 100000
namespace mojo {
#if defined(MOJO_CV2) || defined(MOJO_CV3)
// forward declare these for data augmentation
cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false);
mojo::matrix cv2matrix(cv::Mat &m);
mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f);
#endif
#ifdef MOJO_PROFILE_LAYERS
#ifdef _WIN32
//* used for profiling layers
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
#else
void StartCounter(){}
double GetCounter(){return 0;}
#endif
#endif
//*/
void replace_str(std::string& str, const std::string& from, const std::string& to) {
if (from.empty())
return;
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
// returns Energy (euclidian distance / 2) and max index
float match_labels(const float *out, const float *target, const int size, int *best_index = NULL)
{
float E = 0;
int max_j = 0;
for (int j = 0; j<size; j++)
{
E += (out[j] - target[j])*(out[j] - target[j]);
if (out[max_j]<out[j]) max_j = j;
}
if (best_index) *best_index = max_j;
E *= 0.5;
return E;
}
// returns index of highest value (argmax)
int arg_max(const float *out, const int size)
{
int max_j = 0;
for (int j = 0; j<size; j++)
if (out[max_j]<out[j])
{max_j = j; }//std::cout <<j<<",";}
return max_j;
}
//----------------------------------------------------------------------
// network
// - class that holds all the layers and connection information
// - runs forward prediction
class network
{
int _size; // output size
int _thread_count; // determines number of layer sets (copys of layers)
int _internal_thread_count; // used for speeding up convolutions, etc..
static const int MAIN_LAYER_SET = 0;
// training related stuff
int _batch_size; // determines number of dW sets
float _skip_energy_level;
bool _smart_train;
std::vector <float> _running_E;
double _running_sum_E;
cost_function *_cost_function;
solver *_solver;
static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2;
static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1;
#ifdef MOJO_OMP
omp_lock_t _lock_batch;
void lock_batch() {omp_set_lock(&_lock_batch);}
void unlock_batch() {omp_unset_lock(&_lock_batch);}
void init_lock() {omp_init_lock(&_lock_batch);}
void destroy_lock() {omp_destroy_lock(&_lock_batch);}
int get_thread_num() {return omp_get_thread_num();}
#else
void lock_batch() {}
void unlock_batch() {}
void init_lock(){}
void destroy_lock() {}
int get_thread_num() {return 0;}
#endif
public:
// training progress stuff
int train_correct;
int train_skipped;
int stuck_counter;
int train_updates;
int train_samples;
int epoch_count;
int max_epochs;
float best_estimated_accuracy;
int best_accuracy_count;
float old_estimated_accuracy;
float estimated_accuracy;
// data augmentation stuff
int use_augmentation; // 0=off, 1=mojo, 2=opencv
int augment_x, augment_y;
int augment_h_flip, augment_v_flip;
mojo::pad_type augment_pad;
float augment_theta;
float augment_scale;
// here we have multiple sets of the layers to allow threading and batch processing
// a separate layer set is needed for each independent thread
std::vector< std::vector<base_layer *>> layer_sets;
std::map<std::string, int> layer_map; // name-to-index of layer for layer management
std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected
std::vector<matrix *> W; // these are the weights between/connecting layers
// these sets are needed because we need copies for each item in mini-batch
std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these
std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these
std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these
network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1)
{
_internal_thread_count=1;
_size=0;
_solver = new_solver(opt_name);
_cost_function = NULL;
//std::vector<base_layer *> layer_set;
//layer_sets.push_back(layer_set);
layer_sets.resize(1);
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
_running_sum_E = 0.;
train_correct = 0;
train_samples = 0;
train_skipped = 0;
epoch_count = 0;
max_epochs = 1000;
train_updates = 0;
estimated_accuracy = 0;
old_estimated_accuracy = 0;
stuck_counter = 0;
best_estimated_accuracy=0;
best_accuracy_count=0;
use_augmentation=0;
augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0;
augment_pad =mojo::edge;
augment_theta=0; augment_scale=0;
init_lock();
#ifdef USE_AF
af::setDevice(0);
af::info();
#endif
}
~network()
{
clear();
if (_cost_function) delete _cost_function;
if(_solver) delete _solver;
destroy_lock();
}
// call clear if you want to load a different configuration/model
void clear()
{
for(int i=0; i<(int)layer_sets.size(); i++)
{
__for__(auto l __in__ layer_sets[i]) delete l;
layer_sets.clear();
}
layer_sets.clear();
__for__(auto w __in__ W) if(w) delete w;
W.clear();
layer_map.clear();
layer_graph.clear();
}
// output size of final layer;
int out_size() {return _size;}
// get input size
bool get_input_size(int *w, int *h, int *c)
{
if(layer_sets[MAIN_LAYER_SET].size()<1) return false;
*w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans;
return true;
}
// sets up number of layer copies to run over multiple threads
void build_layer_sets()
{
int layer_cnt = (int)layer_sets.size();
if (layer_cnt<_thread_count) layer_sets.resize(_thread_count);
// ToDo: add shrink back / else if(layer_cnt>_thread_count)
sync_layer_sets();
}
inline int get_thread_count() {return _thread_count;}
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
void enable_external_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) threads = omp_get_num_procs();
_thread_count = threads;
if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count);
omp_set_nested(1);
#else
if (threads < 1) _thread_count = 1;
else _thread_count = threads;
if (threads > 1) bail("must define MOJO_OMP to used threading");
#endif
build_layer_sets();
}
void enable_internal_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count
if(threads<1) _internal_thread_count=1;
else _internal_thread_count=threads;
omp_set_nested(1);
#else
_internal_thread_count=1;
#endif
}
// when using threads, need to get bias data synched between all layer sets,
// call this after bias update in main layer set to copy the bias to the other sets
void sync_layer_sets()
{
for(int i=1; i<(int)layer_sets.size();i++)
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
(layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k];
}
// used to add some noise to weights
void heat_weights()
{
__for__(auto w __in__ W)
{
if (!w) continue;
matrix noise(w->cols, w->rows, w->chans);
noise.fill_random_normal(1.f/ noise.size());
//noise *= *w;
*w += noise;
}
}
// used to add some noise to weights
void remove_means()
{
__for__(auto w __in__ W)
if(w) w->remove_mean();
}
// used to push a layer back in the ORDERED list of layers
// if connect_all() is used, then the order of the push_back is used to connect the layers
// when forward or backward propogation, this order is used for the serialized order of calculations
// Layer_name must be unique.
bool push_back(const char *layer_name, const char *layer_config)
{
if(layer_map[layer_name]) return false; //already exists
base_layer *l=new_layer(layer_name, layer_config);
// set map to index
// make sure there is a 'set' to add layers to
if(layer_sets.size()<1)
{
std::vector<base_layer *> layer_set;
layer_sets.push_back(layer_set);
}
// make sure layer_sets are created
build_layer_sets();
layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size();
layer_sets[MAIN_LAYER_SET].push_back(l);
// upadate as potential last layer - so it sets the out size
_size=l->fan_size();
// add other copies needed for threading
for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config));
return true;
}
// connect 2 layers together and initialize weights
// top and bottom concepts are reversed from literature
// my 'top' is the input of a forward() pass and the 'bottom' is the output
// perhaps 'top' traditionally comes from the brain model, but my 'top' comes
// from reading order (information flows top to bottom)
void connect(const char *layer_name_top, const char *layer_name_bottom)
{
size_t i_top=layer_map[layer_name_top];
size_t i_bottom=layer_map[layer_name_bottom];
base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top];
base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom];
int w_i=(int)W.size();
matrix *w = l_bottom->new_connection(*l_top, w_i);
W.push_back(w);
layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom));
// need to build connections for other batches/threads
for(int i=1; i<(int)layer_sets.size(); i++)
{
l_top= layer_sets[i][i_top];
l_bottom= layer_sets[i][i_bottom];
delete l_bottom->new_connection(*l_top, w_i);
}
// we need to let solver prepare space for stateful information
if (_solver)
{
if (w)_solver->push_back(w->cols, w->rows, w->chans);
else _solver->push_back(1, 1, 1);
}
int fan_in=l_bottom->fan_size();
int fan_out=l_top->fan_size();
// ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet)
// after all connections, run through and do weights with correct fan count
// initialize weights - ToDo: separate and allow users to configure(?)
if (w && l_bottom->has_weights())
{
if (strcmp(l_bottom->p_act->name, "tanh") == 0)
{
// xavier : for tanh
float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
// float weight_base = (float)(std::sqrt(.25/( (double)fan_in)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0))
{
// xavier : for sigmoid
float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0)
|| (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0))
{
// he : for relu
float weight_base = (float)(std::sqrt(2. / (double)fan_in));
w->fill_random_normal(weight_base);
}
else
{
// lecun : orig
float weight_base = (float)(std::sqrt(1. / (double)fan_in));
w->fill_random_uniform(weight_base);
}
}
else if (w) w->fill(0);
}
// automatically connect all layers in the order they were provided
// easy way to go, but can't deal with branch/highway/resnet/inception types of architectures
void connect_all()
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++)
connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str());
}
int get_layer_index(const char *name)
{
for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0)
return j;
return -1;
}
// get the list of layers used (but not connection information)
std::string get_configuration()
{
std::string str;
std::string space(" ");
std::string symbol(" : ");
// print all layer configs
for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
//std::string jstr(dtoa(j));
std::string jstr;
if(j == 0) jstr = "0";
else jstr = dtoa(j);
std::string lname(layer_sets[MAIN_LAYER_SET][j]->name);
std::string lsets(layer_sets[MAIN_LAYER_SET][j]->get_config_string());
//str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string();
str += space + jstr + symbol + lname + symbol + lsets;
}
str += "\n";
// print layer links
if (layer_graph.size() <= 0) return str;
for (int j = 0; j < (int)layer_graph.size(); j++)
{
if (j % 3 == 0) str += " ";
if((j % 3 == 1)|| (j % 3 == 2)) str += ", ";
str +=layer_graph[j].first + "-" + layer_graph[j].second;
if (j % 3 == 2) str += "\n";
}
return str;
}
// performs forward pass and returns class index
// do not delete or modify the returned pointer. it is a live pointer to the last layer in the network
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
int predict_class(const float *in, int _thread_number = -1)
{
const float* out = forward(in, _thread_number);
// for(int i = 0; i < out_size(); i++)
// printf("%d: %f\n", i, out[i]);
return arg_max(out, out_size());
}
//----------------------------------------------------------------------------------------------------------
// F O R W A R D
//
// the main forward pass
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
// train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..)
float* forward(const float *in, int _thread_number=-1, int _train=0)
{
// for(int i = W[0]->size()-10; i < W[0]->size(); i++)
// printf("W[i]->x[%d] = %f\n", i, W[0]->x[i]);
if(_thread_number<0) _thread_number=get_thread_num();
if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n");
if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n");
//std::cout << get_thread_num() << ",";
// clear nodes to zero & find input layers
std::vector<base_layer *> inputs;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer);
layer->set_threading(_internal_thread_count);
layer->node.fill(0.f);
}
// first layer assumed input. copy input to it
const float *in_ptr = in;
//base_layer * layer = layer_sets[_thread_number][0];
//memcpy(layer->node.x, in, sizeof(float)*layer->node.size());
__for__(auto layer __in__ inputs)
{
memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
in_ptr += layer->node.size();
}
//for (int i = 0; i < layer->node.size(); i++)
// layer_sets[_thread_number][0]->node.x[i] = in[i];
// for all layers
__for__(auto layer __in__ layer_sets[_thread_number])
{
// add bias and activate these outputs (they should all be summed up from other branches at this point)
//for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
layer->activate_nodes();
//for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
// send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition
__for__ (auto &link __in__ layer->forward_linked_layers)
{
// instead of having a list of paired connections, just use the shape of W to determine connections
// this is harder to read, but requires less look-ups
// the 'link' variable is a std::pair created during the connect() call for the layers
int connection_index = link.first;
base_layer *p_bottom = link.second;
// weight distribution of the signal to layers under it
#ifdef MOJO_PROFILE_LAYERS
StartCounter();
#endif
p_bottom->accumulate_signal(*layer, *W[connection_index], _train);
//if (p_bottom->has_weights())
//for(int j=0; j<layer->node.chans; j++)
//int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
#ifdef MOJO_PROFILE_LAYERS
std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n";
#endif
}
}
// return pointer to float * result from last layer
/* std::cout << "out:";
for (int i = 0; i < 10; i++)
{
std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<",";
}
std::cout << "\n";
*/
return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x;
}
void fprint_networkfile(const char *fmt, ...) {
char buf[BUFSIZ] = { '\0' };
va_list ap;
va_start(ap, fmt);
vsnprintf(buf, BUFSIZ, fmt, ap);
va_end(ap);
ocall_fprint_networkfile(buf);
}
//----------------------------------------------------------------------------------------------------------
// W R I T E
//
// write parameters to stream/file
// note that this does not persist intermediate training information that could be needed to 'pickup where you left off'
bool write(char *filename, bool binary = false, bool final = false)
{
int retocall;
open_outputnetworkfile(&retocall, filename);
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
fprint_networkfile("mojo01\n");
fprint_networkfile("%d\n", (int)(layer_cnt));
// ofs<<"mojo01" << std::endl;
// ofs<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
fprint_networkfile("%s\n%s", layer_sets[MAIN_LAYER_SET][j]->name, layer_sets[MAIN_LAYER_SET][j]->get_config_string().c_str());
// ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
fprint_networkfile("%d\n", (int)layer_graph.size());
//ofs<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
fprint_networkfile("%s\n%s\n", layer_graph[j].first.c_str(), layer_graph[j].second.c_str());
// ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
if(binary)
{
//ofs<<(int)1<<std::endl; // flags that this is binary data
fprint_networkfile("1\n");
// binary version to save space if needed
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if(layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
int breakdown = 0; //
while(breakdown + blocksize < layer_sets[MAIN_LAYER_SET][j]->bias.size())
{
ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), (layer_sets[MAIN_LAYER_SET][j]->bias.size()-breakdown)*sizeof(float));
// ocall_write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
// for(int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float); k++)
// fprint_networkfile("%c", (char*)layer_sets[MAIN_LAYER_SET][j]->bias.x+k);
//ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
// save weights
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
int breakdown = 0; //
while(breakdown + blocksize < W[j]->size())
{
ocall_write((char*)W[j]->x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_write((char*)W[j]->x + breakdown*sizeof(float), (W[j]->size()-breakdown)*sizeof(float));
}
// for(int k = 0; k < W[j]->size()*sizeof(float); k++)
// fprint_networkfile("%c", (char*)W[j]->x+k);
// ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
else
{
//ofs<<(int)0<<std::endl;
fprint_networkfile("0\n");
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
fprint_networkfile("%f ", layer_sets[MAIN_LAYER_SET][j]->bias.x[k]);
//ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
fprint_networkfile("\n");
//ofs << std::endl;
}
}
// save weights
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++)
fprint_networkfile("%f ", W[j]->x[i]);
//ofs << W[j]->x[i] << " ";
fprint_networkfile("\n");
//ofs << std::endl;
}
}
}
//ofs.flush();
close_outputnetworkfile();
return true;
}
// read network from a file/stream
bool endoffile;
std::string getcleanline()
{
std::string s;
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
//std::istream::sentry se(ifs, true);
//std::streambuf* sb = ifs.rdbuf();
for (;;) {
char c;
ocall_fread_networkfile(&c);//sb->sbumpc();
//printf("%d\n", c);
switch (c) {
case '\n':
//printf("s = %s\n", s);
return s;
case '\r':
//if (sb->sgetc() == '\n') sb->sbumpc();
char cc; ocall_fread_networkfile(&cc);
//printf("\\r got, %d\n", c, cc);
if (cc == '\n')
return s;
case EOF:
endoffile = true;
//printf("end of file, %d, %s\n", c, s);
// Also handle the case when the last line has no line ending
if (s.empty()) //ifs.setstate(std::ios::eofbit);
return s;
default:
s += (char)c;
}
}
}
//----------------------------------------------------------------------------------------------------------
// R E A D
//
bool read()
{
// if(!ifs.good()) return false;
std::string s;
s = getcleanline();
int layer_count;
int version = 0;
if (s.compare("mojo01")==0)
{
s = getcleanline();
layer_count = atoi(s.c_str());
version = 1;
printf("version = 1, layer_count: %d, line: %s\n", layer_count, s);
}
else if (s.find("mojo:") == 0)
{
//printf("version = -1\n");
version = -1;
int cnt = 1;
while (!endoffile)
{
s = getcleanline();
if (s.empty()) continue;
if(s[0]=='#') continue;
push_back(dtoa(cnt), s.c_str());
printf("layer %d: %s\n", cnt, s.c_str());
cnt++;
}
connect_all();
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
else
{
layer_count = atoi(s.c_str());
printf("layer_count: %d, line: %s\n", layer_count, s);
}
// read layer def
std::string layer_name;
std::string layer_def;
for (auto i=0; i<layer_count; i++)
{
layer_name = getcleanline();
layer_def = getcleanline();
push_back(layer_name.c_str(), layer_def.c_str());
printf("%s: %s\n", layer_name.c_str(), layer_def.c_str());
}
// read graph
int graph_count;
//ifs>>graph_count;
ocall_getint(&graph_count);
end_this_line();
//printf("graph_count: %d\n", graph_count);
//getline(ifs,s); // get endline; just want to end reading the line? the result is not important
if (graph_count <= 0)
{
connect_all();
}
else
{
std::string layer_name1;
std::string layer_name2;
for (auto i=0; i<graph_count; i++)
{
layer_name1= getcleanline();
layer_name2 = getcleanline();
printf("%d: %s", i, layer_name1.c_str());
printf("\t%s", layer_name2.c_str());
printf("\n");
connect(layer_name1.c_str(), layer_name2.c_str());
}
}
int binary;
s=getcleanline(); // get endline
binary = atoi(s.c_str());
printf("binary: %d\n", binary);
// binary version to save space if needed
if(binary==1)
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
//int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
//int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride
// use ocall_read instead, ww31
// ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
int breakdown = 0; //
while(breakdown + blocksize < layer_sets[MAIN_LAYER_SET][j]->bias.size())
{
ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x + breakdown*sizeof(float), (layer_sets[MAIN_LAYER_SET][j]->bias.size()-breakdown)*sizeof(float));
// ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
printf("loading weight for %d-th layer: %d\n", j, W[j]->size());
// ocall_read((char*)W[j]->x, W[j]->size()*sizeof(float));
int breakdown = 0; //
while(breakdown + blocksize < W[j]->size())
{
ocall_read((char*)W[j]->x + breakdown*sizeof(float), blocksize*sizeof(float));
breakdown += blocksize;
}
ocall_read((char*)W[j]->x + breakdown*sizeof(float), (W[j]->size()-breakdown)*sizeof(float));
// ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float));
}else
printf("loading weight for %d-th layer: 0\n", j);
}
}
else if(binary==0)// text version
{
// read bias
for(int j=0; j<layer_count; j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
// int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
// int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
// for (int i = 0; i < c; i++)
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
{
//ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k];
ocall_getfloat(&layer_sets[MAIN_LAYER_SET][j]->bias.x[k]);
//std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ",";
}
//ifs.ignore();// getline(ifs, s); // get endline
end_this_line();
}
}
// read weights
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++)
ocall_getfloat(&W[j]->x[i]);
//ifs >> W[j]->x[i];
//ifs.ignore(); //getline(ifs, s); // get endline
end_this_line();
}
}
}
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
bool read(char *filename)
{
//std::ifstream fs(filename.c_str(),std::ios::binary);
int retocall;
open_networkfile(&retocall, filename);
if (retocall == 0)
{
endoffile = false;
bool ret = read();
close_networkfile();
return ret;
}
else return false;
}
// bool read(const char *filename) { return read(std::string(filename)); }
#ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed
// ===========================================================================
// training part
// ===========================================================================
// resets the state of all batches to 'free' state
//void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); }
void reset_mini_batch()
{
for(int i = 0; i < batch_open.size(); i++)
memset(&batch_open[i], BATCH_FREE, sizeof(char));
}
// sets up number of mini batches (storage for sets of weight deltas)
void set_mini_batch_size(int batch_cnt)
{
if (batch_cnt<1) batch_cnt = 1;
_batch_size = batch_cnt;
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
reset_mini_batch();
}
int get_mini_batch_size() { return _batch_size; }
// return index of next free batch
// or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call)
// or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees)
int get_next_open_batch()
{
int reserved = 0;
int filled = 0;
for (int i = 0; i<batch_open.size(); i++)
{
if (batch_open[i] == BATCH_FREE) return i;
if (batch_open[i] == BATCH_RESERVED) reserved++;
if (batch_open[i] == BATCH_COMPLETE) filled++;
}
if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves
if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete
bail("threading error"); // should not get here unless threading problem
}
//----------------------------------------------------------------------------------------------------------
// s y n c m i n i b a t c h
//
// apply all weights to first set of dW, then apply to model weights
void sync_mini_batch()
{
// need to ensure no batches in progress (reserved)
int next = get_next_open_batch();
if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock");
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// sum contributions
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
// if batch free, then make sure it is zero'd out because we will increment dW set [0]
if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dW_sets[0][w_index] += dW_sets[b][w_index];
}
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
// bias stuff... that needs to be fixed for conv layers perhaps
if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0);
for (int b = 1; b< _batch_size; b++)
{
if (batch_open[b] == BATCH_COMPLETE) dbias_sets[0][k] += dbias_sets[b][k];
}
}
// update weights
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0)
if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10%
}
layer->update_bias(dbias_sets[0][k], _solver->learning_rate);
}
// prepare to start mini batch over
reset_mini_batch();
train_updates++; // could have no updates .. so this is not exact
sync_layer_sets();
}
// reserve_next.. is used to reserve a space in the minibatch for the existing training sample
int reserve_next_batch()
{
lock_batch();
int my_batch_index = -3;
while (my_batch_index < 0)
{
my_batch_index = get_next_open_batch();
if (my_batch_index >= 0) // valid index
{
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete
{
sync_mini_batch(); // resets _batch_index to 0
my_batch_index = get_next_open_batch();
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
// need to wait for ones in progress to finish
unlock_batch();
mojo_sleep(1);
lock_batch();
}
return -3;
}
float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;}
void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;}
void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();}
bool get_smart_training() {return _smart_train;}
void set_smart_training(bool _use_train) { _smart_train = _use_train;}
float get_smart_train_level() { return _skip_energy_level; }
void set_smart_train_level(float _level) { _skip_energy_level = _level; }
void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; }
int get_epoch() { return epoch_count; }
// goal here is to update the weights W.
// use w_new = w_old - alpha dE/dw
// E = sum: 1/2*||y-target||^2
// note y = f(x*w)
// dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev
// similarly for cross entropy
// ===========================================================================
// training part
// ===========================================================================
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 1;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = 0;
augment_scale = 0;
}
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 2;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = rotation_deg;
augment_scale = scale;
}
// call before starting training for current epoch
void start_epoch(std::string loss_function="mse")
{
_cost_function=new_cost_function(loss_function);
train_correct = 0;
train_skipped = 0;
train_updates = 0;
train_samples = 0;
if (epoch_count == 0) reset_solver();
// accuracy not improving .. slow learning
if(_smart_train && (best_accuracy_count > 4))
{
stuck_counter++;
set_learning_rate((0.5f)*get_learning_rate());
if (get_learning_rate() < 0.000001f)
{
// heat_weights();
set_learning_rate(0.000001f);
stuck_counter++;// end of the line.. so speed up end
}
best_accuracy_count = 0;
}
old_estimated_accuracy = estimated_accuracy;
estimated_accuracy = 0;
//_skip_energy_level = 0.05;
_running_sum_E = 0;
}
// time to stop?
bool elvis_left_the_building()
{
// 2 stuck x 4 non best accuracy to quit = 8 times no improvement
if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true;
else return false;
}
// call after putting all training samples through this epoch
bool end_epoch()
{
// run leftovers through mini-batch
sync_mini_batch();
epoch_count++;
// estimate accuracy of validation run
estimated_accuracy = 100.f*train_correct / train_samples;
if (train_correct > best_estimated_accuracy)
{
best_estimated_accuracy = (float)train_correct;
best_accuracy_count = 0;
stuck_counter = 0;
}
else best_accuracy_count++;
return elvis_left_the_building();
}
// if smart training was thinking about exiting, calling reset will make it think everything is OK
void reset_smart_training()
{
stuck_counter=0;
best_accuracy_count = 0;
best_estimated_accuracy = 0;
}
//----------------------------------------------------------------------------------------------------------
// u p d a t e _ s m a r t _ t r a i n
//
void update_smart_train(const float E, bool correct)
{
#ifdef MOJO_OMP
#pragma omp critical
#endif
{
train_samples++;
if (correct) train_correct++;
if (_smart_train)
{
_running_E.push_back(E);
_running_sum_E += E;
const int SMART_TRAIN_SAMPLE_SIZE = 1000;
int s = (int)_running_E.size();
if (s >= SMART_TRAIN_SAMPLE_SIZE)
{
_running_sum_E /= (double)s;
std::sort(_running_E.begin(), _running_E.end());
float top_fraction = (float)_running_sum_E*10.f; //10.
const float max_fraction = 0.75f;
const float min_fraction = 0.075f;// 0.03f;
if (top_fraction > max_fraction) top_fraction = max_fraction;
if (top_fraction < min_fraction) top_fraction = min_fraction;
int index = s - 1 - (int)(top_fraction*(s - 1));
if (_running_E[index] > 0) _skip_energy_level = _running_E[index];
_running_sum_E = 0;
_running_E.clear();
}
}
if (E > 0 && E < _skip_energy_level)
{
//std::cout << "E=" << E;
train_skipped++;
}
} // omp critical
}
// finish back propogation through the hidden layers
void backward_hidden(const int my_batch_index, const int thread_number)
{
const int layer_cnt = (int)layer_sets[thread_number].size();
const int last_layer_index = layer_cnt - 1;
base_layer *layer;// = layer_sets[thread_number][last_layer_index];
// update hidden layers
// start at lower layer and push information up to previous layer
// handle dropout first
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
// all the signals should be summed up to this layer by now, so we go through and take the grad of activiation
int nodes = layer->node.size();
// already did last layer, so skip it
if (k< last_layer_index)
for (int i = 0; i< nodes; i++)
layer->delta.x[i] *= layer->df(layer->node.x, i, nodes);
// now pass that signal upstream
__for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop
{
base_layer *p_top = link.second;
// note all the delta[connections[i].second] should have been calculated by time we get here
layer->distribute_delta(*p_top, *W[link.first]);
}
}
// update weights - shouldn't matter the direction we update these
// we can stay in backwards direction...
// it was not faster to combine distribute_delta and increment_w into the same loop
int size_W = (int)W.size();
dW_sets[my_batch_index].resize(size_W);
dbias_sets[my_batch_index].resize(layer_cnt);
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
base_layer *p_top = link.second;
int w_index = (int)link.first;
//if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue;
layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20%
// moved this out to sync_mini_batch();
//_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10%
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
dbias_sets[my_batch_index][k] = layer->delta;
}
// if all batches finished, update weights
lock_batch();
batch_open[my_batch_index] = BATCH_COMPLETE;
int next_index = get_next_open_batch();
if (next_index == BATCH_FILLED_COMPLETE) // all complete
sync_mini_batch(); // resets _batch_index to 0
unlock_batch();
}
mojo::matrix make_input(float *in, const int _thread_number)
{
mojo::matrix augmented_input;// = auto_augmentation();
std::vector<base_layer *> inputs;
int in_size = 0;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL)
{
inputs.push_back(layer);
in_size += layer->node.size();
}
}
if (use_augmentation > 0)
{
augmented_input.resize(in_size, 1, 1);
unsigned int randint[6];
sgx_read_rand((unsigned char *)randint, sizeof(int)*6);
float s = ((float)(randint[0] % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(randint[1] % 101) / 50.f - 1.f)*augment_theta;
//float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
//float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
bool flip_h = ((randint[2] % 2)*augment_h_flip) ? true: false;
bool flip_v = ((randint[3] % 2)*augment_v_flip) ? true: false;
int shift_x = (randint[4] % (augment_x * 2 + 1)) - augment_x;
int shift_y = (randint[5] % (augment_y * 2 + 1)) - augment_y;
int offset = 0;
__for__(auto layer __in__ inputs)
{
//memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
//in_ptr += layer->node.size();
// copy input to matrix type
mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset);
if (m.rows > 1 && m.cols > 1)
{
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if ((augment_theta > 0 || augment_scale > 0))
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s);
#endif
if (flip_v)m = m.flip_cols();
if (flip_h) m = m.flip_rows();
mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad);
memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size());
offset += aug.size();
}
else
{
memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size());
offset += m.size();
}
}
// input = augmented_input.x;
}
else
{
augmented_input.resize(in_size, 1, 1);
memcpy(augmented_input.x, in, sizeof(float)*in_size);
}
return augmented_input;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N C L A S S
//
// after starting epoch, call this to train against a class label
// label_index must be 0 to out_size()-1
// for thread safety, you must pass in the thread_index if calling from different threads
bool train_class(float *in, int label_index, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("call allow_threads()");
const int thread_number = _thread_number;
/*
mojo::matrix augmented_input = make_input(in, thread_number);
/*/
float *input = in;
mojo::matrix augmented_input;
if (use_augmentation > 0)
{
//augment_h_flip = flip_h;
//augment_v_flip = flip_v;
// copy input to matrix type
mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in);
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if (augment_theta > 0 || augment_scale > 0)
{
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s);
}
#endif
unsigned int randint[4];
sgx_read_rand((unsigned char *)randint, 4);
if (augment_h_flip)
if ((randint[0] % 2) == 0)
m = m.flip_cols();
if (augment_v_flip)
if ((randint[1] % 2) == 0)
m = m.flip_rows();
augmented_input = m.shift((randint[2] % (augment_x * 2 + 1)) - augment_x, (randint[3] % (augment_y * 2 + 1)) - augment_y, augment_pad);
input = augmented_input.x;
}
//*/
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
forward(input, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
const int layer_delta_size = layer->delta.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
std::vector<float> target;
if((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0)|| (std::string("brokemax").compare(layer->p_act->name) == 0))
target = std::vector<float>(layer_node_size, 0);
else
target = std::vector<float>(layer_node_size, -1);
if(label_index>=0 && label_index<layer_node_size) target[label_index] = 1;
//const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if(cost_activation_type>0)
layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]);
else
layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
float f= mse::cost(layer->node.x[j], target[j]);
E += f;//mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
if ((max_j_target == max_j_out)) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N T A R G E T
//
// after starting epoch, call this to train against a target vector
// for thread safety, you must pass in the thread_index if calling from different threads
// if positive=1, goal is to minimize the distance between in and target
bool train_target(float *in, float *target, int positive=1, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("need to enable OMP");
const int thread_number = _thread_number;
mojo::matrix augmented_input = make_input(in, thread_number);
float *input = augmented_input.x;
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
float *out=forward(in, thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
//int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
// std::vector<float> target;
//if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0))
// target = std::vector<float>(layer_node_size, 0);
// else
// target = std::vector<float>(layer_node_size, -1);
// if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1;
const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("brokemax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++)
{
if (positive) // want to minimize distance
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(layer->node.x[j] - target[j]);
else
layer->delta.x[j] = grad_fudge*_cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
}
else
{
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge*cost_activation_type*(1.f-abs(layer->node.x[j] - target[j]));
else
layer->delta.x[j] = grad_fudge*(1.f-abs(_cost_function->d_cost(layer->node.x[j], target[j])))*layer->df(layer->node.x, j, layer_node_size);
}
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
// L2 distance x 2
E += mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
// FIxME if ((max_j_target == max_j_out)) match = true;
if (E < 0.01 && positive) match = true;
else if (E > 0.1 && !positive) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
backward_hidden(my_batch_index, thread_number);
return true;
}
#else
float get_learning_rate() {return 0;}
void set_learning_rate(float alpha) {}
void train(float *in, float *target){}
void reset() {}
float get_smart_train_level() {return 0;}
void set_smart_train_level(float _level) {}
bool get_smart_train() { return false; }
void set_smart_train(bool _use) {}
#endif
};
}
|
sdfgen.c | #include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#ifdef _WIN32
#include <fcntl.h>
#include <io.h>
#endif
#include "df.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
enum FILETYPE { FT_NONE = -1, FT_PNG, FT_BMP, FT_JPG, FT_TGA };
static void error(const char* str, ...) {
va_list args;
va_start(args, str);
vfprintf(stderr, str, args);
putc('\n', stderr);
exit(-1);
}
static void usage() {
const char* usage =
"usage: chaq_sdfgen [-f filetype] -i file -o file [-q n] [-s n] [-ahln]\n"
" -f filetype: manually specify filetype among PNG, BMP, TGA, and JPG\n"
" (default: deduced by output filename. if not deducable, default is png)\n"
" -i file: input file\n"
" specify \"-\" to read input from stdin\n"
" -o file: output file\n"
" specify \"-\" to output to stdout\n"
" -q n: jpg quality (default: 100, only relevant for jpeg output)\n"
" -s n: spread radius in pixels (default: 64)\n"
" -a: asymmetric spread (disregard negative distances, becomes unsinged distance transformation)\n"
" (default: symmetric)\n"
" -h: show the usage\n"
" -l: test pixel based on image luminance (default: tests based on alpha channel)\n"
" -n: invert alpha test; values below threshold will be counted as \"inside\" (default: not inverted)";
puts(usage);
}
// transforms input image data into boolean buffer
static void transform_img_to_bool(const unsigned char* restrict img_in, bool* restrict bool_out, size_t width,
size_t height, size_t stride, size_t offset, bool test_above) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
unsigned char threshold = 127;
bool pixel = test_above ? img_in[(size_t)i * stride + offset] > threshold
: img_in[(size_t)i * stride + offset] < threshold;
bool_out[i] = pixel;
}
}
// transforms boolean buffer to float buffer
static void transform_bool_to_float(const bool* restrict bool_in, float* restrict float_out, size_t width,
size_t height, bool true_is_zero) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
float_out[i] = bool_in[(size_t)i] == true_is_zero ? 0.f : INFINITY;
}
}
// single-channel char array output of input floats
static void transform_float_to_byte(const float* restrict float_in, unsigned char* restrict byte_out, size_t width,
size_t height, size_t spread, bool asymmetric) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
// clamped linear remap
float s_min = asymmetric ? 0 : -(float)spread;
float s_max = (float)spread;
float d_min = 0.f;
float d_max = 255.f;
float sn = s_max - s_min;
float nd = d_max - d_min;
float v = float_in[i];
v = v > s_max ? s_max : v;
v = v < s_min ? s_min : v;
float remap = (((v - s_min) * nd) / sn) + d_min;
byte_out[(size_t)i] = (unsigned char)remap;
}
}
static void transform_float_sub(float* restrict float_dst, float* restrict float_by, size_t width, size_t height) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
float bias = -1.f;
float val = float_by[(size_t)i] > 0.f ? float_by[i] + bias : float_by[(size_t)i];
float_dst[(size_t)i] -= val;
}
}
static enum FILETYPE read_filetype(const char* string) {
const char* type_table[] = {"png", "bmp", "jpg", "tga"};
size_t n_types = sizeof(type_table) / sizeof(const char*);
for (size_t filetype = 0; filetype < n_types; ++filetype) {
if (strncmp(string, type_table[filetype], 3) == 0) return (enum FILETYPE)filetype;
}
return FT_NONE;
}
static void write_to_stdout(void* context, void* data, int size) {
(void)(context);
fwrite(data, (size_t)size, 1, stdout);
}
int main(int argc, char** argv) {
omp_set_nested(1);
char* infile = NULL;
char* outfile = NULL;
size_t test_channel = 1;
bool test_above = true;
bool asymmetric = false;
size_t spread = 64;
size_t quality = 100;
enum FILETYPE filetype = FT_NONE;
bool output_to_stdout = false;
bool open_from_stdin = false;
// process arguments
for (int i = 0; i < argc; ++i) {
if (argv[i][0] != '-') continue;
switch (argv[i][1]) {
// i - input file
case 'i': {
if (++i >= argc && infile == NULL) {
usage();
error("No input file specified.");
} else if (i < argc) {
if (strncmp("-", argv[i], 2) == 0) {
open_from_stdin = true;
#ifdef _WIN32
_setmode(_fileno(stdin), _O_BINARY);
#endif
}
infile = argv[i];
}
} break;
// o - output file
case 'o': {
if (++i >= argc && outfile == NULL) {
usage();
error("No output file specified.");
} else if (i < argc) {
if (strncmp("-", argv[i], 2) == 0) {
output_to_stdout = true;
#ifdef _WIN32
_setmode(_fileno(stdout), _O_BINARY);
#endif
}
outfile = argv[i];
}
} break;
// s - spread parameter
case 's': {
if (++i >= argc) {
usage();
error("No number specified with spread.");
}
spread = strtoull(argv[i], NULL, 10);
} break;
// q -- jpeg quality
case 'q': {
if (++i >= argc) {
usage();
error("No number specified with quality.");
}
quality = strtoull(argv[i], NULL, 10);
} break;
// f -- filetype
case 'f': {
if (++i >= argc) {
usage();
error("Filetype not specified with filetype switch.");
}
if ((filetype = read_filetype(argv[i])) == FT_NONE) {
usage();
error("Invalid filetype specified.");
}
} break;
// flags
default: {
size_t j = 1;
while (argv[i][j]) {
switch (argv[i][j]) {
// h - help
case 'h': {
usage();
return 0;
}
// n - invert (test for below threshold instead of above)
case 'n': {
test_above = false;
} break;
// l - test based on luminance
case 'l': {
test_channel = 0;
} break;
// a - asymmetric spread
case 'a': {
asymmetric = true;
} break;
}
++j;
}
} break;
}
}
if (!quality || quality > 100) {
usage();
error("Invalid value given for jpeg quality. Must be between 1-100");
}
if (!spread) {
usage();
error("Invalid value given for spread. Must be a positive integer.");
}
if (infile == NULL) {
usage();
error("No input file specified.");
}
if (outfile == NULL) {
usage();
error("No output file specified.");
}
// 2 channels sufficient to get alpha data of image
int w;
int h;
int n;
int c = 2;
unsigned char* img_original;
if (open_from_stdin) {
img_original = stbi_load_from_file(stdin, &w, &h, &n, c);
} else {
img_original = stbi_load(infile, &w, &h, &n, c);
}
if (img_original == NULL) error("Input file could not be opened.");
// transform image into bool image
bool* img_bool = malloc((size_t)(w * h) * sizeof(bool));
if (img_bool == NULL) error("img_bool malloc failed.");
transform_img_to_bool(img_original, img_bool, (size_t)w, (size_t)h, (size_t)c * sizeof(unsigned char), test_channel,
test_above);
stbi_image_free(img_original);
// compute 2d sdf images
// inside -- pixel distance to INSIDE
// outside -- pixel distance to OUTSIDE
float* img_float_inside = malloc((size_t)(w * h) * sizeof(float));
if (img_float_inside == NULL) error("img_float_inside malloc failed.");
float* img_float_outside = malloc((size_t)(w * h) * sizeof(float));
if (img_float_outside == NULL) error("img_float_outside malloc failed.");
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
transform_bool_to_float(img_bool, img_float_inside, (size_t)w, (size_t)h, true);
dist_transform_2d(img_float_inside, (size_t)w, (size_t)h);
}
#pragma omp section
{
transform_bool_to_float(img_bool, img_float_outside, (size_t)w, (size_t)h, false);
dist_transform_2d(img_float_outside, (size_t)w, (size_t)h);
}
}
free(img_bool);
// consolidate in the form of (outside - inside) to img_float_outside
transform_float_sub(img_float_outside, img_float_inside, (size_t)w, (size_t)h);
free(img_float_inside);
// transform distance values to pixel values
unsigned char* img_byte = malloc((size_t)(w * h) * sizeof(unsigned char));
if (img_byte == NULL) error("img_byte malloc failed.");
transform_float_to_byte(img_float_outside, img_byte, (size_t)w, (size_t)h, spread, asymmetric);
free(img_float_outside);
// deduce filetype if not specified
if (!output_to_stdout) {
char* dot = strrchr(outfile, '.');
if (dot != NULL && filetype == FT_NONE) {
filetype = read_filetype(dot + 1);
}
}
// output image
switch (filetype) {
case FT_BMP: {
// bmp
if (output_to_stdout) {
stbi_write_bmp_to_func(write_to_stdout, NULL, w, h, 1, img_byte);
} else {
stbi_write_bmp(outfile, w, h, 1, img_byte);
}
} break;
case FT_JPG: {
// jpg
if (output_to_stdout) {
stbi_write_jpg_to_func(write_to_stdout, NULL, w, h, 1, img_byte, (int)quality);
} else {
stbi_write_jpg(outfile, w, h, 1, img_byte, (int)quality);
}
} break;
case FT_TGA: {
// tga
if (output_to_stdout) {
stbi_write_tga_to_func(write_to_stdout, NULL, w, h, 1, img_byte);
} else {
stbi_write_tga(outfile, w, h, 1, img_byte);
}
} break;
case FT_PNG:
case FT_NONE: {
// png
if (output_to_stdout) {
stbi_write_png_to_func(write_to_stdout, NULL, w, h, 1, img_byte, w * (int)sizeof(unsigned char));
} else {
stbi_write_png(outfile, w, h, 1, img_byte, w * (int)sizeof(unsigned char));
}
} break;
}
free(img_byte);
return 0;
}
|
r_direct_o1.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "optimizer.h"
#include "nr_direct.h"
#include "time_rev.h"
static void transpose01324(double complex * __restrict__ a,
double complex * __restrict__ at,
int di, int dj, int dk, int dl, int ncomp)
{
int i, j, k, l, m, ic;
int dij = di * dj;
int dijk = dij * dk;
double complex *pa;
m = 0;
for (ic = 0; ic < ncomp; ic++) {
for (l = 0; l < dl; l++) {
for (j = 0; j < dj; j++) {
pa = a + j*di;
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
at[m] = pa[i];
m++;
}
pa += dij;
}
}
a += dijk;
}
}
}
/*
* for given ksh, lsh, loop all ish, jsh
*/
void CVHFdot_rs1(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh,
CINTOpt *cintopt, CVHFOpt *vhfopt, struct _VHFEnvs *envs)
{
const int nao = envs->nao;
const int nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int *tao = envs->tao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int idm;
int ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *buf;
double complex *pv;
double *dms_cond[n_dm];
double dm_atleast;
void (*pf)();
int (*fprescreen)();
int (*r_vkscreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
r_vkscreen = vhfopt->r_vkscreen;
} else {
fprescreen = CVHFnoscreen;
r_vkscreen = CVHFr_vknoscreen;
}
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < envs->nbas; ksh++) {
for (lsh = 0; lsh < envs->nbas; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt,
envs->atm, envs->bas, envs->env)) {
// append buf.transpose(0,2,1,3) to eris, to reduce the cost of r_direct_dot
dijkl = di * dj * dk * dl;
buf = malloc(sizeof(double complex) * dijkl*ncomp*2);
if ((*intor)(buf, shls, envs->atm, envs->natm,
envs->bas, envs->nbas, envs->env,
cintopt)) {
if ((*r_vkscreen)(shls, vhfopt,
dms_cond, n_dm, &dm_atleast,
envs->atm, envs->bas, envs->env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], envs->nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
free(buf);
}
} }
}
/*
* for given ish, jsh, loop all ksh > lsh
*/
static void dot_rs2sub(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh, int ksh_count,
CINTOpt *cintopt, CVHFOpt *vhfopt,
struct _VHFEnvs *envs)
{
const int nao = envs->nao;
const int nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int *tao = envs->tao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int idm;
int ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *buf;
double complex *pv;
double *dms_cond[n_dm];
double dm_atleast;
void (*pf)();
int (*fprescreen)();
int (*r_vkscreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
r_vkscreen = vhfopt->r_vkscreen;
} else {
fprescreen = CVHFnoscreen;
r_vkscreen = CVHFr_vknoscreen;
}
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < ksh_count; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt,
envs->atm, envs->bas, envs->env)) {
dijkl = di * dj * dk * dl;
buf = malloc(sizeof(double complex) * dijkl*ncomp*2);
if ((*intor)(buf, shls, envs->atm, envs->natm,
envs->bas, envs->nbas, envs->env,
cintopt)) {
if ((*r_vkscreen)(shls, vhfopt,
dms_cond, n_dm, &dm_atleast,
envs->atm, envs->bas, envs->env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], envs->nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
free(buf);
}
} }
}
void CVHFdot_rs2ij(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh,
CINTOpt *cintopt, CVHFOpt *vhfopt, struct _VHFEnvs *envs)
{
if (ish >= jsh) {
CVHFdot_rs1(intor, fjk, dms, vjk, n_dm, ncomp,
ish, jsh, cintopt, vhfopt, envs);
}
}
void CVHFdot_rs2kl(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh,
CINTOpt *cintopt, CVHFOpt *vhfopt, struct _VHFEnvs *envs)
{
dot_rs2sub(intor, fjk, dms, vjk, n_dm, ncomp,
ish, jsh, envs->nbas, cintopt, vhfopt, envs);
}
void CVHFdot_rs4(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh,
CINTOpt *cintopt, CVHFOpt *vhfopt, struct _VHFEnvs *envs)
{
if (ish >= jsh) {
dot_rs2sub(intor, fjk, dms, vjk, n_dm, ncomp,
ish, jsh, envs->nbas, cintopt, vhfopt, envs);
}
}
void CVHFdot_rs8(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int ish, int jsh,
CINTOpt *cintopt, CVHFOpt *vhfopt, struct _VHFEnvs *envs)
{
if (ish < jsh) {
return;
}
const int nao = envs->nao;
const int nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int *tao = envs->tao;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int idm;
int ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *buf;
double complex *pv;
double *dms_cond[n_dm];
double dm_atleast;
void (*pf)();
int (*fprescreen)();
int (*r_vkscreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
r_vkscreen = vhfopt->r_vkscreen;
} else {
fprescreen = CVHFnoscreen;
r_vkscreen = CVHFr_vknoscreen;
}
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
/* when ksh==ish, (lsh<jsh) misses some integrals (eg k<i&&l>j).
* These integrals are calculated in the next (ish,jsh) pair. To show
* that, we just need to prove that every elements in shell^4 appeared
* only once in fjk_s8. */
if ((ksh == ish) && (lsh > jsh)) {
break;
}
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt,
envs->atm, envs->bas, envs->env)) {
dijkl = di * dj * dk * dl;
buf = malloc(sizeof(double complex) * dijkl*ncomp*2);
if ((*intor)(buf, shls, envs->atm, envs->natm,
envs->bas, envs->nbas, envs->env,
cintopt)) {
if ((*r_vkscreen)(shls, vhfopt,
dms_cond, n_dm, &dm_atleast,
envs->atm, envs->bas, envs->env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], envs->nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
free(buf);
}
} }
}
/*
* drv loop over ij, generate eris of kl for given ij, call fjk to
* calculate vj, vk.
*
* n_dm is the number of dms for one [array(ij|kl)],
* ncomp is the number of components that produced by intor
*/
void CVHFr_direct_drv(int (*intor)(), void (*fdot)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = CINTtot_cgto_spinor(bas, nbas);
int *ao_loc = malloc(sizeof(int)*(nbas+1));
int *tao = malloc(sizeof(int)*nao);
struct _VHFEnvs envs = {natm, nbas, atm, bas, env, nao, ao_loc, tao};
memset(vjk, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
CINTshells_spinor_offset(ao_loc, bas, nbas);
ao_loc[nbas] = nao;
CVHFtimerev_map(tao, bas, nbas);
#pragma omp parallel default(none) \
shared(intor, fdot, fjk, \
dms, vjk, n_dm, ncomp, nbas, cintopt, vhfopt, envs)
{
int i, j, ij;
double complex *v_priv = malloc(sizeof(double complex)*nao*nao*n_dm*ncomp);
memset(v_priv, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
#pragma omp for nowait schedule(dynamic)
for (ij = 0; ij < nbas*nbas; ij++) {
i = ij / nbas;
j = ij - i * nbas;
(*fdot)(intor, fjk, dms, v_priv, n_dm, ncomp, i, j,
cintopt, vhfopt, &envs);
}
#pragma omp critical
{
for (i = 0; i < nao*nao*n_dm*ncomp; i++) {
vjk[i] += v_priv[i];
}
}
free(v_priv);
}
free(ao_loc);
free(tao);
}
|
cmapLapParaSimilarity.h | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the program and software framework */
/* CMAP-LAP --- Configurable Massively Parallel Solver for Lattice Problems */
/* */
/* Copyright Written by Nariaki Tateiwa <n-tateiwa@kyudai.jp>, */
/* Yuji Shinano <shinano@zib.de>, */
/* Copyright (C) 2021 by Zuse Institute Berlin, */
/* licensed under LGPL version 3 or later. */
/* Commercial licenses are available through <licenses@zib.de> */
/* */
/* This code is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU Lesser General Public License */
/* as published by the Free Software Foundation; either version 3 */
/* of the License, or (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU Lesser General Public License for more details. */
/* */
/* You should have received a copy of the GNU Lesser General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/**@file cmapLapParaSimilarity.h
* @brief Functions to calculate similarity of basis set.
* @author Nariaki Tateiwa, Yuji Shinano
*
*
*
*/
/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
#ifndef __CMAP_LAP_PARA_SIMILARITY_H__
#define __CMAP_LAP_PARA_SIMILARITY_H__
#include <vector>
#include <chrono>
#include <random>
#include <deque>
#include <set>
#include <cassert>
#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/SVD>
namespace ParaCMapLAP
{
namespace BasisSimilarity
{
/// index to calculate grassmann metric
std::vector<int> grassmannIndexes;
/// calculate grassmann metric
Eigen::MatrixXd scores_geodesic_metric;
Eigen::MatrixXd scores_chordal_metric;
Eigen::MatrixXd scores_fubini_study_metric;
Eigen::MatrixXd scores_chordal_2norm_metric;
Eigen::MatrixXd scores_chordal_fnorm_metric;
Eigen::MatrixXd scores_projection_2norm_metric;
Eigen::MatrixXd scores_max_metric;
Eigen::MatrixXd scores_mean_metric;
Eigen::VectorXd scores_num_upper_duplicate;
Eigen::VectorXd scores_num_all_duplicate;
///
/// @brief setter of grassmannIndexes
/// @param[in] inGrassmannIndexes
///
void setGrassmannIndexes(
std::vector<int> inGrassmannIndexes
)
{
grassmannIndexes = inGrassmannIndexes;
}
///
/// @brief similarity log header
/// @param[in] n dimension
/// @return std::string
///
std::string outputSimilarityOfBasisHeader(
int n
)
{
assert( grassmannIndexes.size() > 0 );
std::ostringstream s;
s << "SimilarityStatus"
<< ",time"
<< ",numPairs";
std::vector<std::string> metrics{"num_upper_duplicate", "num_all_duplicate"};
std::vector<std::string> grassmannMetricNames{
"grassmann_geodesic",
"grassmann_chordal",
"grassmann_fubini",
"grassmann_chordal_2norm",
"grassmann_chordal_fnorm",
"grassmann_projection_2norm",
"grassmann_max",
"grassmann_mean"
};
for( auto name : grassmannMetricNames )
{
for( auto i : grassmannIndexes )
{
std::ostringstream metric;
metric << name << "_" << i;
metrics.push_back(metric.str());
}
}
for( auto metric : metrics )
{
s << "," << metric << "_min";
s << "," << metric << "_max";
s << "," << metric << "_average";
}
return s.str();
}
///
/// @brief sampling of vector
/// @tparam T population type
/// @param[in] population list
/// @param[out] sampled list
/// @param[in] sampled size
///
template<typename T> void sample(
T &list,
T &sampled_list,
int size
)
{
assert( static_cast<int>(list.size()) >= size );
std::vector<int> indexes(list.size(), 0);
for ( size_t i = 0; i < list.size(); ++i )
indexes[i] = i;
std::random_device seed_gen;
// std::mt19937 engine {seed_gen()};
std::mt19937 engine {0};
std::shuffle(indexes.begin(), indexes.end(), engine);
sampled_list.resize(size);
for ( int i = 0; i < size; ++i )
sampled_list[i] = list[indexes[i]];
}
///
/// @brief sign function
/// @param[in] x
/// @return sign of x ( 1 or -1 )
///
int sign(
int x
)
{
if( x >= 0 ) return 1;
else return -1;
}
///
/// @brief max(k; A(i) == B(i) || A(i) == -B(i) for all i <= k )
/// @param[in] basisA basis
/// @param[in] basisB basis
/// @return number of matches from the top of the basis vector
///
double num_upper_duplicate(
LatticeBasis<int>& basisA,
LatticeBasis<int>& basisB
)
{
double score = 0.0;
for( int i = 0; i < basisA.rows(); i++ )
{
if( sign(basisA.coeff(i, 0))*basisA.row(i)
== sign(basisB.coeff(i, 0))*basisB.row(i) )
score += 1;
else
break;
}
return score;
}
///
/// @brief coun ( ( A(i) == B(i) || A(i) == -B(i) ) for all i )
/// @param[in] basisA basis
/// @param[in] basisB basis
/// @return number of basis vector overlaps
///
double num_all_duplicate(
LatticeBasis<int>& basisA,
LatticeBasis<int>& basisB
)
{
double score = 0.0;
for( int i = 0; i < basisA.rows(); i++ )
{
if( sign(basisA.coeff(i, 0))*basisA.row(i)
== sign(basisB.coeff(i, 0))*basisB.row(i) )
score += 1;
}
return score;
}
/// @brief set Gram-Schmidt matrix
/// @parma[in] basis
/// @parma[out] GSO Gram-Schmidt of basis
///
void setGramSchmidt(
LatticeBasis<int>& basis,
Eigen::MatrixXd& GSO
)
{
GSO = LatticeBasis<int>(basis).cast<double>()
.transpose()
.householderQr()
.householderQ()
.transpose();
}
///
/// @brief Grassmann Geodesic Metric
/// @param[in] cc canonical correlations
/// @details sqrt(sum(theta_i^2 for 0 <= i < m))
/// = sqrt(sum(acos(cc(i))^2 for 0 <= i < m))
///
double grassmann_geodesic_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = 0.0;
double theta = 0.0;
for( int i = 0; i < m; ++i )
{
theta = std::acos(cc(i));
d += theta * theta;
}
return std::sqrt(d);
}
///
/// @brief Grassmann Projected Metric
/// @param[in] cc canonicalCorrelations
/// @details 2^(-1/2) frobenius_norm(GSOA*GSOA^T - GSOB*GSOB^T)
/// = sqrt(m - sum(cos^2(theta_i) for 0 <= i < m))
/// = sqrt(m - sum(cc(i)^2) for 0 <= i <= m)
///
double grassmann_chordal_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = m;
for( int i = 0; i < m; i++ )
d -= cc(i) * cc(i);
return std::sqrt(d);
}
///
/// @brief Grassmann Fubini-Study Metric
/// @param[in] cc canonical correlations
/// @details acos(prod(cos(theta_i) for 0 <= i < m))
/// = acos(prod(cc(i) for 0 <= i < m))
///
double grassmann_fubini_study_metric(
Eigen::VectorXd &cc
)
{
return std::acos( cc.prod() );
}
///
/// @brief Grassmann Chordal 2-norm Metric
/// @param[in] cc canonical correlations
/// @details 2norm( U*GSOA^T - V*GSOB )
/// = infinity_norm(2*sin(theta_i/2) for 0 <= i < m)
/// = 2 * max(sin(theta_i/2) for 0 <= i < m)
/// = 2 * sqrt( max(sin^2(theta_i/2) for 0 <= i < m) ) (because 0 < theta_i < pi)
/// = 2 * sqrt( max((1-cc(i))/2 for 0 <= i < m) )
///
double grassmann_chordal_2norm_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = (1.0 - cc(0)) / 2.0;
for( int i = 1; i < m; ++i )
{
d = std::max(d, (1.0 - cc(i))/2.0);
}
return 2.0 * std::sqrt(d);
}
///
/// @brief Grassmann Chordal frobenius-norm Metric
/// @param[in] cc canonical correlations
/// @details frobenius_norm( U*GSOA^T - V*GSOB )
/// = 2_norm(2*sin(theta_i/2) for 0 <= i < m)
/// = 2 * sqrt(sum(sin^2(theta_i/2) for 0 <= i < m)
/// = 2 * sqrt(sum((1-cc(i))/2 for 0 <= i < m)
///
double grassmann_chordal_fnorm_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = 0.0;
for( int i = 0; i < m; ++i )
d += (1.0 - cc(i)) / 2.0;
return 2.0 * std::sqrt(d);
}
///
/// @brief Grassmann Projection 2-norm Metric
/// @param[in] cc canonicalCorrelations
/// @details 2norm( GSOA*GSOA^T - GSOB*GSOB^T )
/// infinity_norm( sin(theta_i) ) for 0 <= i < m) )
/// = sqrt( max( sin^2(theta_i) for 0 <= i < m) ) (because 0 <= theta_i <= pi)
/// = sqrt( max( 1 - cos^2(theta_i) for 0 <= i < m) )
/// = sqrt( max( 1 - cc(i)^2 for 0 <= i < m) )
/// = sqrt( 1 - cc(0)^2 )
///
double grassmann_projection_2norm_metric(
Eigen::VectorXd &cc
)
{
// int m = cc.size();
double d = 1.0 - cc(0) * cc(0);
if( d < 1.0e-5 ) return 0;
// for( int i = 1; i < m; ++i )
// {
// d = std::max(d, 1.0 - cc(i) * cc(i));
// }
return std::sqrt(d);
}
///
/// @brief Grassmann max Metric
/// @param[in] cc canonicalCorrelations
/// @details min( sin(theta_i) ) for 0 <= i < m) )
/// = sqrt( min( sin^2(theta_i) for 0 <= i < m) ) (because 0 <= theta_i <= pi)
/// = sqrt( min( 1 - cos^2(theta_i) for 0 <= i < m) )
/// = sqrt( min( 1 - cc(i)^2 for 0 <= i < m) )
/// = sqrt( 1 - cc(m-1)^2 )
///
double grassmann_max_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = 1.0 - cc(m-1) * cc(m-1);
if( d < 1.0e-5 ) return 0;
return std::sqrt(d);
}
///
/// @brief Grassmann Mean Metric
/// @param[in] cc canonicalCorrelations
/// @details mean( sin(\theta_i)^2 for 0 <= i <= m )
/// = mean( 1 - cos(\theta_i)^2 for 0 <= i <= m )
///
double grassmann_mean_metric(
Eigen::VectorXd &cc
)
{
int m = cc.size();
double d = 0.0;
for( int i = 0; i < m; i++ )
d += 1.0 - cc(i)*cc(i);
return d / static_cast<double>(m);
}
///
/// @brief output log
/// @param[in] basisDeque container of basis
/// @param[in] time
/// @param[in] n dimension of basis
/// @param[in] nSamples number of samples for calculation of basis similarity
/// @param[in] num_threads number threads
/// @param[in] verbose ( 0: not, 1: light, 2: medium, 3: heave )
///
std::string outputSimilarityOfBasis(
std::deque<std::shared_ptr<LatticeBasis<int>>> &basisDeque,
double time,
int n,
int nSamples=-1,
int numThreads=1,
int verbose=0
)
{
auto start = std::chrono::system_clock::now();
auto elapsed = [&start](
)
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now()-start
).count() / 1000.0;
};
int numBasis = basisDeque.size();
int numPairs = numBasis * (numBasis-1) / 2;
int k = -1;
double tol = 1e-6;
std::vector<std::pair<int, int>> combinations;
combinations.resize(numPairs);
k = -1;
for ( int i = 0; i < numBasis; ++i )
{
for ( int j = i+1; j < numBasis; ++j )
{
combinations[++k] = std::make_pair(i, j);
}
}
// sampling
if( nSamples == -1 ){ nSamples = combinations.size(); }
nSamples = std::min(nSamples, static_cast<int>(combinations.size()));
decltype(combinations) sampledConbinations;
sample(
combinations,
sampledConbinations,
nSamples
);
// calculate GSO matrix
std::vector<Eigen::MatrixXd> GSOList;
GSOList.resize(basisDeque.size());
std::set<int> calcGSOindexes;
for( auto pair : sampledConbinations )
{
calcGSOindexes.insert(pair.first);
calcGSOindexes.insert(pair.second);
}
for( auto _k : calcGSOindexes )
{
Eigen::MatrixXd GSO;
setGramSchmidt(*basisDeque.at(_k), GSO);
GSOList[_k] = GSO;
}
// calculate grassmann metric
scores_geodesic_metric .resize(nSamples, grassmannIndexes.size());
scores_chordal_metric .resize(nSamples, grassmannIndexes.size());
scores_fubini_study_metric .resize(nSamples, grassmannIndexes.size());
scores_chordal_2norm_metric .resize(nSamples, grassmannIndexes.size());
scores_chordal_fnorm_metric .resize(nSamples, grassmannIndexes.size());
scores_projection_2norm_metric.resize(nSamples, grassmannIndexes.size());
scores_max_metric .resize(nSamples, grassmannIndexes.size());
scores_mean_metric .resize(nSamples, grassmannIndexes.size());
scores_num_upper_duplicate .resize(nSamples);
scores_num_all_duplicate .resize(nSamples);
#pragma omp parallel for num_threads(numThreads) schedule(static)
for( k = 0; k < nSamples; ++k )
{
int i, j;
std::tie(i, j) = sampledConbinations[k];
// duplicate
scores_num_upper_duplicate(k) = num_upper_duplicate(
*(basisDeque.at(i)), *(basisDeque.at(j))
);
scores_num_all_duplicate(k) = num_all_duplicate(
*(basisDeque.at(i)), *(basisDeque.at(j))
);
// grassmann
// 1. generate sub GSO
// 2. get canonical angles
// 3. calculate metric
for( int l = grassmannIndexes.size()-1; l > -1; --l )
{
int d = grassmannIndexes[l];
Eigen::VectorXd canonicalCorrelations = Eigen::VectorXd::Ones(n-d);
if( d >= n / 2.0 )
{
// GSO = [b*0; b*1; ...; b*n-1] -> [b*d; ...; b*(n-1)]
Eigen::MatrixXd subGSOA{GSOList.at(i).block(d,0,n-d,n)};
Eigen::MatrixXd subGSOB{GSOList.at(j).block(d,0,n-d,n)};
// calculate canonical angles
Eigen::JacobiSVD<Eigen::MatrixXd> SVD{subGSOA * subGSOB.transpose()};
canonicalCorrelations.tail(n-d) = SVD.singularValues();
}
else if( d > 0 )
{
// GSO = [b*0; b*1; ...; b*n-1] -> [b*0; ...; b*d-1]
Eigen::MatrixXd subGSOA{GSOList.at(i).block(0,0,d,n)};
Eigen::MatrixXd subGSOB{GSOList.at(j).block(0,0,d,n)};
// calculate canonical angles
Eigen::JacobiSVD<Eigen::MatrixXd> SVD{subGSOA * subGSOB.transpose()};
canonicalCorrelations.tail(d) = SVD.singularValues();
}
for( int ii = 0; ii < n-d; ++ii )
{
canonicalCorrelations(ii) = std::min(std::max(canonicalCorrelations(ii), -1.0), 1.0);
}
if( verbose > 0 )
{
std::cout << "k: " << k << " pair(" << i << ", " << j << ") d " << d << std::endl;
std::cout << "canonicalCorrelations " << canonicalCorrelations.transpose() << std::endl;
}
if( (canonicalCorrelations.array() >= 1.0-tol).all() && (canonicalCorrelations.array() <= 1.0+tol).all() )
{
// GSOA == GSOB
scores_geodesic_metric(k, l) = 0.0;
scores_chordal_metric(k, l) = 0.0;
scores_fubini_study_metric(k, l) = 0.0;
scores_chordal_2norm_metric(k, l) = 0.0;
scores_chordal_fnorm_metric(k, l) = 0.0;
scores_projection_2norm_metric(k, l) = 0.0;
scores_max_metric(k, l) = 0.0;
scores_mean_metric(k, l) = 0.0;
}
else
{
// calculate metric
scores_geodesic_metric(k, l)
= grassmann_geodesic_metric(canonicalCorrelations);
scores_chordal_metric(k, l)
= grassmann_chordal_metric(canonicalCorrelations);
scores_fubini_study_metric(k, l)
= grassmann_fubini_study_metric(canonicalCorrelations);
scores_chordal_2norm_metric(k, l)
= grassmann_chordal_2norm_metric(canonicalCorrelations);
scores_chordal_fnorm_metric(k, l)
= grassmann_chordal_fnorm_metric(canonicalCorrelations);
scores_projection_2norm_metric(k, l)
= grassmann_projection_2norm_metric(canonicalCorrelations);
scores_max_metric(k, l)
= grassmann_max_metric(canonicalCorrelations);
scores_mean_metric(k, l)
= grassmann_mean_metric(canonicalCorrelations);
}
}
}
std::ostringstream s;
s << "SimilarityStatus"
<< "," << time
<< "," << scores_num_upper_duplicate.size();
// upper_duplicate
s << "," << scores_num_upper_duplicate.minCoeff();
s << "," << scores_num_upper_duplicate.maxCoeff();
s << "," << scores_num_upper_duplicate.mean();
// all_duplicate
s << "," << scores_num_all_duplicate.minCoeff();
s << "," << scores_num_all_duplicate.maxCoeff();
s << "," << scores_num_all_duplicate.mean();
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_geodesic_metric
s << "," << scores_geodesic_metric.col(i).minCoeff();
s << "," << scores_geodesic_metric.col(i).maxCoeff();
s << "," << scores_geodesic_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_chordal_metric
s << "," << scores_chordal_metric.col(i).minCoeff();
s << "," << scores_chordal_metric.col(i).maxCoeff();
s << "," << scores_chordal_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_fubini_study_metric
s << "," << scores_fubini_study_metric.col(i).minCoeff();
s << "," << scores_fubini_study_metric.col(i).maxCoeff();
s << "," << scores_fubini_study_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_chordal_2norm
s << "," << scores_chordal_2norm_metric.col(i).minCoeff();
s << "," << scores_chordal_2norm_metric.col(i).maxCoeff();
s << "," << scores_chordal_2norm_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_chordal_fnorm
s << "," << scores_chordal_fnorm_metric.col(i).minCoeff();
s << "," << scores_chordal_fnorm_metric.col(i).maxCoeff();
s << "," << scores_chordal_fnorm_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_projection_2norm
s << "," << scores_projection_2norm_metric.col(i).minCoeff();
s << "," << scores_projection_2norm_metric.col(i).maxCoeff();
s << "," << scores_projection_2norm_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_max_norm
s << "," << scores_max_metric.col(i).minCoeff();
s << "," << scores_max_metric.col(i).maxCoeff();
s << "," << scores_max_metric.col(i).mean();
}
for( size_t i = 0; i < grassmannIndexes.size(); ++i )
{
// grassmann_mean_norm
s << "," << scores_mean_metric.col(i).minCoeff();
s << "," << scores_mean_metric.col(i).maxCoeff();
s << "," << scores_mean_metric.col(i).mean();
}
std::cout << "\r simlarity total " << elapsed() << " sec" << std::endl;
return s.str();
}
} // namespace BasisSimilarity
} // namespace ParaCMapLAP
#endif // __CMAP_LAP_PARA_SIMILARITY_H__
|
Diffus4th_order_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Diffus4th_order_core.h"
#include "utils.h"
#define EPS 1.0e-7
/* C-OMP implementation of fourth-order diffusion scheme [1] for piecewise-smooth recovery (2D/3D case)
* The minimisation is performed using explicit scheme.
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambda - regularization parameter
* 3. Edge-preserving parameter (sigma)
* 4. Number of iterations, for explicit scheme >= 150 is recommended
* 5. tau - time-marching step for the explicit scheme
*
* Output:
* [1] Regularized image/volume
*
* This function is based on the paper by
* [1] Hajiaboli, M.R., 2011. An anisotropic fourth-order diffusion filter for image noise removal. International Journal of Computer Vision, 92(2), pp.177-191.
*/
float Diffus4th_CPU_main(float *Input, float *Output, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int dimX, int dimY, int dimZ)
{
int i,DimTotal;
float sigmaPar2;
float *W_Lapl=NULL;
sigmaPar2 = sigmaPar*sigmaPar;
DimTotal = dimX*dimY*dimZ;
W_Lapl = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ == 1) {
/* running 2D diffusion iterations */
for(i=0; i < iterationsNumb; i++) {
/* Calculating weighted Laplacian */
Weighted_Laplc2D(W_Lapl, Output, sigmaPar2, dimX, dimY);
/* Perform iteration step */
Diffusion_update_step2D(Output, Input, W_Lapl, lambdaPar, sigmaPar2, tau, (long)(dimX), (long)(dimY));
}
}
else {
/* running 3D diffusion iterations */
for(i=0; i < iterationsNumb; i++) {
/* Calculating weighted Laplacian */
Weighted_Laplc3D(W_Lapl, Output, sigmaPar2, dimX, dimY, dimZ);
/* Perform iteration step */
Diffusion_update_step3D(Output, Input, W_Lapl, lambdaPar, sigmaPar2, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
}
}
free(W_Lapl);
return *Output;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
float Weighted_Laplc2D(float *W_Lapl, float *U0, float sigma, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq;
#pragma omp parallel for shared(W_Lapl) private(i,j,i1,i2,j1,j2,index,gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
gradX = 0.5f*(U0[j*dimX+i2] - U0[j*dimX+i1]);
gradX_sq = pow(gradX,2);
gradY = 0.5f*(U0[j2*dimX+i] - U0[j1*dimX+i]);
gradY_sq = pow(gradY,2);
gradXX = U0[j*dimX+i2] + U0[j*dimX+i1] - 2*U0[index];
gradYY = U0[j2*dimX+i] + U0[j1*dimX+i] - 2*U0[index];
gradXY = 0.25f*(U0[j2*dimX+i2] + U0[j1*dimX+i1] - U0[j1*dimX+i2] - U0[j2*dimX+i1]);
xy_2 = 2.0f*gradX*gradY*gradXY;
denom = gradX_sq + gradY_sq;
if (denom <= EPS) {
V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/EPS;
V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/EPS;
}
else {
V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/denom;
V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/denom;
}
c = 1.0f/(1.0f + denom/sigma);
c_sq = c*c;
W_Lapl[index] = c_sq*V_norm + c*V_orth;
}
}
return *W_Lapl;
}
float Diffusion_update_step2D(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float gradXXc, gradYYc;
#pragma omp parallel for shared(Output, Input, W_Lapl) private(i,j,i1,i2,j1,j2,index,gradXXc,gradYYc)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
index = j*dimX+i;
gradXXc = W_Lapl[j*dimX+i2] + W_Lapl[j*dimX+i1] - 2*W_Lapl[index];
gradYYc = W_Lapl[j2*dimX+i] + W_Lapl[j1*dimX+i] - 2*W_Lapl[index];
Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc) - (Output[index] - Input[index]));
}
}
return *Output;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
float Weighted_Laplc3D(float *W_Lapl, float *U0, float sigma, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2;
#pragma omp parallel for shared(W_Lapl) private(i,j,k,i1,i2,j1,j2,k1,k2,index,gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(k=0; k<dimZ; k++) {
/* symmetric boundary conditions */
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
index = (dimX*dimY)*k + j*dimX+i;
gradX = 0.5f*(U0[(dimX*dimY)*k + j*dimX+i2] - U0[(dimX*dimY)*k + j*dimX+i1]);
gradX_sq = pow(gradX,2);
gradY = 0.5f*(U0[(dimX*dimY)*k + j2*dimX+i] - U0[(dimX*dimY)*k + j1*dimX+i]);
gradY_sq = pow(gradY,2);
gradZ = 0.5f*(U0[(dimX*dimY)*k2 + j*dimX+i] - U0[(dimX*dimY)*k1 + j*dimX+i]);
gradZ_sq = pow(gradZ,2);
gradXX = U0[(dimX*dimY)*k + j*dimX+i2] + U0[(dimX*dimY)*k + j*dimX+i1] - 2*U0[index];
gradYY = U0[(dimX*dimY)*k + j2*dimX+i] + U0[(dimX*dimY)*k + j1*dimX+i] - 2*U0[index];
gradZZ = U0[(dimX*dimY)*k2 + j*dimX+i] + U0[(dimX*dimY)*k1 + j*dimX+i] - 2*U0[index];
gradXY = 0.25f*(U0[(dimX*dimY)*k + j2*dimX+i2] + U0[(dimX*dimY)*k + j1*dimX+i1] - U0[(dimX*dimY)*k + j1*dimX+i2] - U0[(dimX*dimY)*k + j2*dimX+i1]);
gradXZ = 0.25f*(U0[(dimX*dimY)*k2 + j*dimX+i2] - U0[(dimX*dimY)*k2+j*dimX+i1] - U0[(dimX*dimY)*k1+j*dimX+i2] + U0[(dimX*dimY)*k1+j*dimX+i1]);
gradYZ = 0.25f*(U0[(dimX*dimY)*k2 +j2*dimX+i] - U0[(dimX*dimY)*k2+j1*dimX+i] - U0[(dimX*dimY)*k1+j2*dimX+i] + U0[(dimX*dimY)*k1+j1*dimX+i]);
xy_2 = 2.0f*gradX*gradY*gradXY;
xyz_1 = 2.0f*gradX*gradZ*gradXZ;
xyz_2 = 2.0f*gradY*gradZ*gradYZ;
denom = gradX_sq + gradY_sq + gradZ_sq;
if (denom <= EPS) {
V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/EPS;
V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/EPS;
}
else {
V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/denom;
V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/denom;
}
c = 1.0f/(1.0f + denom/sigma);
c_sq = c*c;
W_Lapl[index] = c_sq*V_norm + c*V_orth;
}
}
}
return *W_Lapl;
}
float Diffusion_update_step3D(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, long dimX, long dimY, long dimZ)
{
long i,j,i1,i2,j1,j2,index,k,k1,k2;
float gradXXc, gradYYc, gradZZc;
#pragma omp parallel for shared(Output, Input, W_Lapl) private(i,j,i1,i2,j1,j2,k,k1,k2,index,gradXXc,gradYYc,gradZZc)
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(k=0; k<dimZ; k++) {
/* symmetric boundary conditions */
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
index = (dimX*dimY)*k + j*dimX+i;
gradXXc = W_Lapl[(dimX*dimY)*k + j*dimX+i2] + W_Lapl[(dimX*dimY)*k + j*dimX+i1] - 2*W_Lapl[index];
gradYYc = W_Lapl[(dimX*dimY)*k + j2*dimX+i] + W_Lapl[(dimX*dimY)*k + j1*dimX+i] - 2*W_Lapl[index];
gradZZc = W_Lapl[(dimX*dimY)*k2 + j*dimX+i] + W_Lapl[(dimX*dimY)*k1 + j*dimX+i] - 2*W_Lapl[index];
Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc + gradZZc) - (Output[index] - Input[index]));
}
}
}
return *Output;
}
|
is.c | /*************************************************************************
* *
* N A S P A R A L L E L B E N C H M A R K S 3.0 *
* *
* O p e n M P V E R S I O N *
* *
* I S *
* *
*************************************************************************
* *
* This benchmark is an OpenMP version of the NPB IS code. *
* It is described in NAS Technical Report 99-011. *
* *
* Permission to use, copy, distribute and modify this software *
* for any purpose with or without fee is hereby granted. We *
* request, however, that all derived work reference the NAS *
* Parallel Benchmarks 3.0. This software is provided "as is" *
* without express or implied warranty. *
* *
* Information on NPB 3.0, including the technical report, the *
* original specifications, source code, results and information *
* on how to submit new results, is available at: *
* *
* http://www.nas.nasa.gov/Software/NPB/ *
* *
* Send comments or suggestions to npb@nas.nasa.gov *
* *
* NAS Parallel Benchmarks Group *
* NASA Ames Research Center *
* Mail Stop: T27A-1 *
* Moffett Field, CA 94035-1000 *
* *
* E-mail: npb@nas.nasa.gov *
* Fax: (650) 604-3957 *
* *
*************************************************************************
* *
* Author: M. Yarrow *
* OpenMP version: *
* H. Jin *
* *
*************************************************************************/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* Bucket sort is not used in OpenMP */
/* #define USE_BUCKETS */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#define USE_HEAP
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[MAX_KEY],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
static int KS=0;
static double R23, R46, T23, T46;
#pragma omp threadprivate(KS, R23, R46, T23, T46)
double randlc(X, A)
double *X;
double *A;
{
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a, int begin, int end )
{
double x;
int i, k;
k = MAX_KEY/4;
for (i=begin; i<end; i++)
{
x = randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************ F I N D _ M Y _ S E E D ************/
/************ ************/
/************ returns parallel random number seq seed ************/
/*****************************************************************/
/*
* Create a random number sequence of total length nn residing
* on np number of processors. Each processor will therefore have a
* subsequence of length nn/np. This routine returns that random
* number which is the first random number for the subsequence belonging
* to processor rank kn, and which is used as seed for proc kn ran # gen.
*/
double find_my_seed( long kn, /* my processor rank, 0<=kn<=num procs */
long np, /* np = num procs */
long nn, /* total num of ran numbers, all procs */
double s, /* Ran num seed, for ex.: 314159265.00 */
double a ) /* Ran num gen mult, try 1220703125.00 */
{
double t1,t2;
long mq,nq,kk,ik;
if ( kn == 0 ) return s;
mq = (nn/4 + np - 1) / np;
nq = mq * 4 * kn; /* number of rans to be skipped */
t1 = s;
t2 = a;
kk = nq;
while ( kk > 1 ) {
ik = kk / 2;
if( 2 * ik == kk ) {
(void)randlc( &t2, &t2 );
kk = ik;
}
else {
(void)randlc( &t1, &t2 );
kk = kk - 1;
}
}
(void)randlc( &t1, &t2 );
return( t1 );
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
/* Now, finally, sort the keys: */
#ifdef SERIAL_SORT
/* Copy keys into work array; keys in key_array will be reassigned. */
#pragma omp parallel for private(i)
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
/* This is actual sorting */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
#else /*SERIAL_SORT*/
/* Memory sorting can be done directly */
#pragma omp parallel for private(i,k)
for( k=0; k<MAX_KEY; k++ ) {
i = (k==0)? 0 : key_buff_ptr_global[k-1];
while ( i<key_buff_ptr_global[k] )
key_array[i++] = k;
}
#endif /*SERIAL_SORT*/
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for private(i) reduction(+:j)
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
printf( "Full_verify: number of keys out of sort: %d\n", j );
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
key_buff_ptr2 = key_array;
key_buff_ptr = key_buff1;
#pragma omp parallel default(shared) private(i) shared(key_buff_ptr,key_buff_ptr2)
{
/* There seems problem to allocate big array on stack, so we try malloc
which should allocate space on heap. */
#ifdef USE_HEAP
INT_TYPE *work_buff;
#else
INT_TYPE work_buff[MAX_KEY];
#endif
int num_threads, my_num, thread_num, num_per_thread;
int *begin, *end;
/* calculate bounds */
num_threads = omp_get_num_threads();
my_num = omp_get_thread_num();
num_per_thread = (MAX_KEY + num_threads - 1)/num_threads;
begin = (int *)malloc(sizeof(int)*num_threads);
end = (int *)malloc(sizeof(int)*num_threads);
#ifdef USE_HEAP
work_buff = (int *)malloc(sizeof(int)*MAX_KEY);
#endif
if (!begin || !end || !work_buff)
{
perror("malloc");
exit(1);
}
for ( i=0; i < num_threads; i++ ) {
begin[i] = num_per_thread * i;
end[i] = begin[i] + num_per_thread;
if (end[i] > MAX_KEY) end[i] = MAX_KEY;
}
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
work_buff[i] = 0;
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ )
work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
for( i=0; i<MAX_KEY-1; i++ )
work_buff[i+1] += work_buff[i];
/* Accumulate the global histogram */
for( i=begin[my_num]; i<end[my_num]; i++ )
key_buff_ptr[i] = work_buff[i];
for ( thread_num = 1; thread_num < num_threads; thread_num++ ) {
my_num++;
if (my_num >= num_threads) my_num = 0;
#pragma omp barrier
for( i=begin[my_num]; i<end[my_num]; i++ )
key_buff_ptr[i] += work_buff[i];
}
free(begin);
free(end);
#ifdef USE_HEAP
free(work_buff);
#endif
}
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff_ptr[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff_ptr[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff_ptr[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff_ptr[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff_ptr[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff_ptr[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff_ptr[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff_ptr[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff_ptr[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff_ptr[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
main( argc, argv )
int argc;
char **argv;
{
int i, iteration, timer_on;
double timecounter;
FILE *fp;
/* Initialize timers */
timer_on = 0;
if ((fp = fopen("timer.flag", "r")) != NULL) {
fclose(fp);
timer_on = 1;
}
timer_clear( 0 );
if (timer_on) {
timer_clear( 1 );
timer_clear( 2 );
timer_clear( 3 );
}
if (timer_on) timer_start( 3 );
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf
( "\n\n NAS Parallel Benchmarks (NPB3.0-OMP) - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
printf( " Number of active threads: %d\n\n", omp_get_max_threads() );
if (timer_on) timer_start( 1 );
/* Generate random number sequence and subsequent keys on all procs */
#pragma omp parallel default(shared) copyin(KS)
{
int num_threads, my_num, begin, end, num_per_thread;
num_threads = omp_get_num_threads();
my_num = omp_get_thread_num();
num_per_thread = (TOTAL_KEYS + num_threads - 1)/num_threads;
begin = my_num * num_per_thread;
end = begin + num_per_thread;
if (end > TOTAL_KEYS) end = TOTAL_KEYS;
create_seq( find_my_seed( my_num,
num_threads,
4*TOTAL_KEYS,
314159265.00, /* Random number gen seed */
1220703125.00 ), /* Random number gen mult */
1220703125.00, /* Random number gen mult */
begin, end);
}
if (timer_on) timer_stop( 1 );
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
if (timer_on) timer_start( 2 );
full_verify();
if (timer_on) timer_stop( 2 );
if (timer_on) timer_stop( 3 );
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
TOTAL_KEYS,
0,
0,
MAX_ITERATIONS,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS );
/* Print additional timers */
if (timer_on) {
double t_total, t_percent;
t_total = timer_read( 3 );
printf("\nAdditional timers -\n");
printf(" Total execution: %8.3f\n", t_total);
if (t_total == 0.0) t_total = 1.0;
timecounter = timer_read(1);
t_percent = timecounter/t_total * 100.;
printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(0);
t_percent = timecounter/t_total * 100.;
printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(2);
t_percent = timecounter/t_total * 100.;
printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent);
}
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
GB_unop__identity_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_uint16)
// op(A') function: GB (_unop_tran__identity_uint32_uint16)
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_uint16)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fp64
// op(A') function: GB_unop_tran__identity_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij = GB_cast_to_int32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fp64
(
int32_t *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
binarytrees3.c | // The Computer Language Benchmarks Game
// https://salsa.debian.org/benchmarksgame-team/benchmarksgame/
//
// Contributed by Jeremy Zerfas
// Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho.
// *reset*
// This controls the width of lines that are output by this program.
#define MAXIMUM_LINE_WIDTH 60
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems.
#include <apr_pools.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct tree_node{
struct tree_node * left_Node, * right_Node;
} tree_node;
// Create a binary tree of depth tree_Depth in memory_Pool, set the root node's
// value to root_Node_Value, and finally return a pointer to the created binary
// tree.
static inline tree_node * create_Tree(const intnative_t tree_Depth,
apr_pool_t * const memory_Pool){
tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node));
// If tree_Depth is one or more then recursively call create_Tree() in order
// to create the left and right subtrees using 2*root_Node_Value-1 and
// 2*root_Node_Value respectively as the root values for those subtrees.
if(tree_Depth>0){
root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool);
root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool);
}else
root_Node->left_Node=root_Node->right_Node=NULL;
return root_Node;
}
// Compute and return the checksum for the binary tree that has root_Node as the
// root node.
static inline intnative_t compute_Tree_Checksum(
const tree_node * const root_Node){
// If there are subtrees then recursively call compute_Tree_Checksum() on
// them and factor their values into the checksum, otherwise just return
// the value of root_Node.
if(root_Node->left_Node)
return compute_Tree_Checksum(root_Node->left_Node)+
compute_Tree_Checksum(root_Node->right_Node)+1;
else
return 1;
}
int main(int argc, char ** argv){
// Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what
// was specified as the argument to the program and minimum_Tree_Depth+2.
const intnative_t minimum_Tree_Depth=4;
intnative_t maximum_Tree_Depth=atoi(argv[1]);
if(maximum_Tree_Depth < minimum_Tree_Depth+2)
maximum_Tree_Depth=minimum_Tree_Depth+2;
apr_initialize();
apr_pool_t * memory_Pool;
// Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1,
// compute the checksum of the binary tree, print the statistics, and then
// delete the memory pool.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool);
printf("stretch tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth+1,
(intmax_t)compute_Tree_Checksum(stretch_Tree));
apr_pool_destroy(memory_Pool);
// Create a memory pool and then create a long-lived binary tree of depth
// maximum_Tree_Depth which will be left alone for a while while
// more binary trees get allocated and deallocaited as required by the
// rules. We'll finish working with this later.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool);
// Create a lot of binary trees in parallel of depths ranging from
// minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their
// checksums, destroy the trees, and then record the statistics to
// output_Buffer[] so they can be displayed in order later.
char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1];
intnative_t current_Tree_Depth;
#pragma omp parallel for
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){
intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+
minimum_Tree_Depth);
// Create a memory pool for this thread to use.
apr_pool_t * thread_Memory_Pool;
apr_pool_create_unmanaged(&thread_Memory_Pool);
intnative_t i=1, total_Trees_Checksum=0;
for(; i<=iterations; ++i){
// Create a binary tree of depth current_Tree_Depth
tree_node * const tree_1=create_Tree(current_Tree_Depth,
thread_Memory_Pool);
total_Trees_Checksum+=compute_Tree_Checksum(tree_1);
apr_pool_clear(thread_Memory_Pool);
}
apr_pool_destroy(thread_Memory_Pool);
// Record the statistics for the trees of depth current_Tree_Depth.
sprintf(output_Buffer[current_Tree_Depth],
"%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations,
(intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum);
}
// Print the statistics for all of the various tree depths.
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2)
printf("%s", output_Buffer[current_Tree_Depth]);
// Compute the checksum of the long-lived binary tree that we created
// earlier, print the statistics, and then delete the memory pool.
printf("long lived tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth,
(intmax_t)compute_Tree_Checksum(long_Lived_Tree));
apr_pool_destroy(memory_Pool);
apr_terminate();
return 0;
}
|
mg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - MG
This benchmark is an OpenMP C version of the NPB MG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: E. Barszcz
P. Frederickson
A. Woo
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "globals.h"
#include "../math/nas_math.h"
#include "../paging_benchmark.h"
#include <nautilus/nautilus.h>
#include <nautilus/shell.h>
/* parameters */
#define T_BENCH 1
#define T_INIT 2
/* global variables */
/* common /grid/ */
static int is1, is2, is3, ie1, ie2, ie3;
/* functions prototypes */
static void setup(int *n1, int *n2, int *n3, int lt);
static void mg3P(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k);
static void psinv( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k);
static void resid( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k );
static void rprj3( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k );
static void interp( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k );
static void norm2u3(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz);
static void rep_nrm(double ***u, int n1, int n2, int n3,
char *title, int kk);
static void comm3(double ***u, int n1, int n2, int n3, int kk);
static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k);
static void showall(double ***z, int n1, int n2, int n3);
static double power( double a, int n );
static void bubble( double ten[M][2], int j1[M][2], int j2[M][2],
int j3[M][2], int m, int ind );
static void zero3(double ***z, int n1, int n2, int n3);
static void nonzero(double ***z, int n1, int n2, int n3);
/*--------------------------------------------------------------------
program mg
c-------------------------------------------------------------------*/
static int program_MG(char *_buf, void* _priv);
static struct shell_cmd_impl nas_mg_impl = {
.cmd = "nas-mg",
.help_str = "NAS parallel benchmark MG",
.handler = program_MG,
};
nk_register_shell_cmd(nas_mg_impl);
#ifdef NAUT_CONFIG_ASPACE_PAGING
int program_MG_paging(char * _buf, void *_priv){
return paging_wrapper(_buf, _priv, &program_MG);
}
static struct shell_cmd_impl nas_mg_paging_impl = {
.cmd = "nas-mg-paging",
.help_str = "NAS parallel benchmark MG with paging",
.handler = program_MG_paging,
};
nk_register_shell_cmd(nas_mg_paging_impl);
#endif
int program_MG(char * _buf, void *_priv) {
/*-------------------------------------------------------------------------
c k is the current level. It is passed down through subroutine args
c and is NOT global. it is the current iteration
c------------------------------------------------------------------------*/
int k, it;
double t, tinit, mflops;
int nthreads = 1;
/*-------------------------------------------------------------------------
c These arrays are in common because they are quite large
c and probably shouldn't be allocated on the stack. They
c are always passed as subroutine args.
c------------------------------------------------------------------------*/
double ****u, ***v, ****r;
double a[4], c[4];
double rnm2, rnmu;
double epsilon = 1.0e-8;
int n1, n2, n3, nit;
double verify_value;
boolean verified;
int i, j, l;
// FILE *fp;
timer_clear(T_BENCH);
timer_clear(T_INIT);
timer_start(T_INIT);
/*----------------------------------------------------------------------
c Read in and broadcast input data
c---------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - MG Benchmark\n\n");
/* fp = fopen("mg.input", "r"); */
/* if (fp != NULL) { */
/* printf(" Reading from input file mg.input\n"); */
/* fscanf(fp, "%d", <); */
/* while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]); */
/* while(fgetc(fp) != '\n'); */
/* fscanf(fp, "%d", &nit); */
/* while(fgetc(fp) != '\n'); */
/* for (i = 0; i <= 7; i++) { */
/* fscanf(fp, "%d", &debug_vec[i]); */
/* } */
/* fclose(fp); */
/* } else { */
/* printf(" No input file. Using compiled defaults\n"); */
lt = LT_DEFAULT;
nit = NIT_DEFAULT;
nx[lt] = NX_DEFAULT;
ny[lt] = NY_DEFAULT;
nz[lt] = NZ_DEFAULT;
for (i = 0; i <= 7; i++) {
debug_vec[i] = DEBUG_DEFAULT;
}
// }
if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) {
Class = 'U';
} else if( nx[lt] == 32 && nit == 4 ) {
Class = 'S';
} else if( nx[lt] == 64 && nit == 40 ) {
Class = 'W';
} else if( nx[lt] == 256 && nit == 20 ) {
Class = 'B';
} else if( nx[lt] == 512 && nit == 20 ) {
Class = 'C';
} else if( nx[lt] == 256 && nit == 4 ) {
Class = 'A';
} else {
Class = 'U';
}
/*--------------------------------------------------------------------
c Use these for debug info:
c---------------------------------------------------------------------
c debug_vec(0) = 1 !=> report all norms
c debug_vec(1) = 1 !=> some setup information
c debug_vec(1) = 2 !=> more setup information
c debug_vec(2) = k => at level k or below, show result of resid
c debug_vec(3) = k => at level k or below, show result of psinv
c debug_vec(4) = k => at level k or below, show result of rprj
c debug_vec(5) = k => at level k or below, show result of interp
c debug_vec(6) = 1 => (unused)
c debug_vec(7) = 1 => (unused)
c-------------------------------------------------------------------*/
a[0] = -8.0/3.0;
a[1] = 0.0;
a[2] = 1.0/6.0;
a[3] = 1.0/12.0;
if (Class == 'A' || Class == 'S' || Class =='W') {
/*--------------------------------------------------------------------
c Coefficients for the S(a) smoother
c-------------------------------------------------------------------*/
c[0] = -3.0/8.0;
c[1] = 1.0/32.0;
c[2] = -1.0/64.0;
c[3] = 0.0;
} else {
/*--------------------------------------------------------------------
c Coefficients for the S(b) smoother
c-------------------------------------------------------------------*/
c[0] = -3.0/17.0;
c[1] = 1.0/33.0;
c[2] = -1.0/61.0;
c[3] = 0.0;
}
lb = 1;
setup(&n1,&n2,&n3,lt);
u = (double ****)malloc((lt+1)*sizeof(double ***));
for (l = lt; l >=1; l--) {
u[l] = (double ***)malloc(m3[l]*sizeof(double **));
for (k = 0; k < m3[l]; k++) {
u[l][k] = (double **)malloc(m2[l]*sizeof(double *));
for (j = 0; j < m2[l]; j++) {
u[l][k][j] = (double *)malloc(m1[l]*sizeof(double));
}
}
}
v = (double ***)malloc(m3[lt]*sizeof(double **));
for (k = 0; k < m3[lt]; k++) {
v[k] = (double **)malloc(m2[lt]*sizeof(double *));
for (j = 0; j < m2[lt]; j++) {
v[k][j] = (double *)malloc(m1[lt]*sizeof(double));
}
}
r = (double ****)malloc((lt+1)*sizeof(double ***));
for (l = lt; l >=1; l--) {
r[l] = (double ***)malloc(m3[l]*sizeof(double **));
for (k = 0; k < m3[l]; k++) {
r[l][k] = (double **)malloc(m2[l]*sizeof(double *));
for (j = 0; j < m2[l]; j++) {
r[l][k][j] = (double *)malloc(m1[l]*sizeof(double));
}
}
}
zero3(u[lt],n1,n2,n3);
zran3(v,n1,n2,n3,nx[lt],ny[lt],lt);
norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
/* printf("\n norms of random v are\n");
printf(" %4d%19.12e%19.12e\n", 0, rnm2, rnmu);
printf(" about to evaluate resid, k= %d\n", lt);*/
printf(" Size: %3dx%3dx%3d (class %1c)\n",
nx[lt], ny[lt], nz[lt], Class);
printf(" Iterations: %3d\n", nit);
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
/*c---------------------------------------------------------------------
c One iteration for startup
c---------------------------------------------------------------------*/
mg3P(u,v,r,a,c,n1,n2,n3,lt);
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
setup(&n1,&n2,&n3,lt);
zero3(u[lt],n1,n2,n3);
zran3(v,n1,n2,n3,nx[lt],ny[lt],lt);
timer_stop(T_INIT);
timer_start(T_BENCH);
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
for ( it = 1; it <= nit; it++) {
mg3P(u,v,r,a,c,n1,n2,n3,lt);
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
}
norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_BENCH);
t = timer_read(T_BENCH);
tinit = timer_read(T_INIT);
verified = FALSE;
verify_value = 0.0;
printf(" Initialization time: %15.3f seconds\n", tinit);
printf(" Benchmark completed\n");
if (Class != 'U') {
if (Class == 'S') {
verify_value = 0.530770700573e-04;
} else if (Class == 'W') {
verify_value = 0.250391406439e-17; /* 40 iterations*/
/* 0.183103168997d-044 iterations*/
} else if (Class == 'A') {
verify_value = 0.2433365309e-5;
} else if (Class == 'B') {
verify_value = 0.180056440132e-5;
} else if (Class == 'C') {
verify_value = 0.570674826298e-06;
}
if ( fabs( rnm2 - verify_value ) <= epsilon ) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" L2 Norm is %20.12e\n", rnm2);
printf(" Error is %20.12e\n", rnm2 - verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" L2 Norm is %20.12e\n", rnm2);
printf(" The correct L2 Norm is %20.12e\n", verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
int nn = nx[lt]*ny[lt]*nz[lt];
mflops = 58.*nit*nn*1.0e-6 / t;
} else {
mflops = 0.0;
}
c_print_results("MG", Class, nx[lt], ny[lt], nz[lt],
nit, nthreads, t, mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
return 0;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(int *n1, int *n2, int *n3, int lt) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int k;
for ( k = lt-1; k >= 1; k--) {
nx[k] = nx[k+1]/2;
ny[k] = ny[k+1]/2;
nz[k] = nz[k+1]/2;
}
for (k = 1; k <= lt; k++) {
m1[k] = nx[k]+2;
m2[k] = nz[k]+2;
m3[k] = ny[k]+2;
}
is1 = 1;
ie1 = nx[lt];
*n1 = nx[lt]+2;
is2 = 1;
ie2 = ny[lt];
*n2 = ny[lt]+2;
is3 = 1;
ie3 = nz[lt];
*n3 = nz[lt]+2;
if (debug_vec[1] >= 1 ) {
printf(" in setup, \n");
printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n");
printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n",
lt,nx[lt],ny[lt],nz[lt],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void mg3P(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c multigrid V-cycle routine
c-------------------------------------------------------------------*/
int j;
/*--------------------------------------------------------------------
c down cycle.
c restrict the residual from the find grid to the coarse
c-------------------------------------------------------------------*/
for (k = lt; k >= lb+1; k--) {
j = k-1;
rprj3(r[k], m1[k], m2[k], m3[k],
r[j], m1[j], m2[j], m3[j], k);
}
k = lb;
/*--------------------------------------------------------------------
c compute an approximate solution on the coarsest grid
c-------------------------------------------------------------------*/
zero3(u[k], m1[k], m2[k], m3[k]);
psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k-1;
/*--------------------------------------------------------------------
c prolongate from level k-1 to k
c-------------------------------------------------------------------*/
zero3(u[k], m1[k], m2[k], m3[k]);
interp(u[j], m1[j], m2[j], m3[j],
u[k], m1[k], m2[k], m3[k], k);
/*--------------------------------------------------------------------
c compute residual for level k
c-------------------------------------------------------------------*/
resid(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k);
/*--------------------------------------------------------------------
c apply smoother
c-------------------------------------------------------------------*/
psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k);
resid(u[lt], v, r[lt], n1, n2, n3, a, k);
psinv(r[lt], u[lt], n1, n2, n3, c, k);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void psinv( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c psinv applies an approximate inverse as smoother: u = u + Cr
c
c This implementation costs 15A + 4M per result, where
c A and M denote the costs of Addition and Multiplication.
c Presuming coefficient c(3) is zero (the NPB assumes this,
c but it is thus not a general case), 2A + 1M may be eliminated,
c resulting in 13A + 3M.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int i3, i2, i1;
double r1[M], r2[M];
#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
/*--------------------------------------------------------------------
c Assume c(3) = 0 (Enable line below if c(3) not= 0)
c---------------------------------------------------------------------
c > + c(3) * ( r2(i1-1) + r2(i1+1) )
c-------------------------------------------------------------------*/
}
}
}
/*--------------------------------------------------------------------
c exchange boundary points
c-------------------------------------------------------------------*/
comm3(u,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
rep_nrm(u,n1,n2,n3," psinv",k);
}
if ( debug_vec[3] >= k ) {
showall(u,n1,n2,n3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void resid( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c resid computes the residual: r = v - Au
c
c This implementation costs 15A + 4M per result, where
c A and M denote the costs of Addition (or Subtraction) and
c Multiplication, respectively.
c Presuming coefficient a(1) is zero (the NPB assumes this,
c but it is thus not a general case), 3A + 1M may be eliminated,
c resulting in 12A + 3M.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int i3, i2, i1;
double u1[M], u2[M];
#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
/*--------------------------------------------------------------------
c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)
c---------------------------------------------------------------------
c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)
c > + u1(i1) )
c-------------------------------------------------------------------*/
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
/*--------------------------------------------------------------------
c exchange boundary data
c--------------------------------------------------------------------*/
comm3(r,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
rep_nrm(r,n1,n2,n3," resid",k);
}
if ( debug_vec[2] >= k ) {
showall(r,n1,n2,n3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void rprj3( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c rprj3 projects onto the next coarser grid,
c using a trilinear Finite Element projection: s = r' = P r
c
c This implementation costs 20A + 4M per result, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int j3, j2, j1, i3, i2, i1, d1, d2, d3;
double x1[M], y1[M], x2, y2;
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
/*C i3 = 2*j3-1*/
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
/*C i2 = 2*j2-1*/
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]
+ r[i3][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]
+ r[i3][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]
+ r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * ( x1[i1] + x1[i1+2] + y2)
+ 0.0625 * ( y1[i1] + y1[i1+2] );
}
}
}
comm3(s,m1j,m2j,m3j,k-1);
if (debug_vec[0] >= 1 ) {
rep_nrm(s,m1j,m2j,m3j," rprj3",k-1);
}
if (debug_vec[4] >= k ) {
showall(s,m1j,m2j,m3j);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void interp( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c interp adds the trilinear interpolation of the correction
c from the coarser grid to the current approximation: u = u + Qu'
c
c Observe that this implementation costs 16A + 4M, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines. Vector machines may get slightly better
c performance however, with 8 separate "do i1" loops, rather than 4.
c-------------------------------------------------------------------*/
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
/*
c note that m = 1037 in globals.h but for this only need to be
c 535 to handle up to 1024^3
c integer m
c parameter( m=535 )
*/
double z1[M], z2[M], z3[M];
if ( n1 != 3 && n2 != 3 && n3 != 3 ) {
#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+0.25*( z1[i1] + z1[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+0.25*( z2[i1] + z2[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+0.25* z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+0.125*( z3[i1] + z3[i1+1] );
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for ( i3 = d3; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+z[i3-1][i2-1][i1-1];
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for nowait
for ( i3 = 1; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1]
+z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]
+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
}
}//end #pragma omp parallel
if (debug_vec[0] >= 1 ) {
rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1);
rep_nrm(u,n1,n2,n3,"u: inter",k);
}
if ( debug_vec[5] >= k ) {
showall(z,mm1,mm2,mm3);
showall(u,n1,n2,n3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void norm2u3(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c norm2u3 evaluates approximations to the L2 norm and the
c uniform (or L-infinity or Chebyshev) norm, under the
c assumption that the boundaries are periodic or zero. Add the
c boundaries in with half weight (quarter weight on the edges
c and eighth weight at the corners) for inhomogeneous boundaries.
c-------------------------------------------------------------------*/
double s = 0.0;
int i3, i2, i1, n;
double a = 0.0, tmp = 0.0;
n = nx*ny*nz;
#pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
s = s + r[i3][i2][i1] * r[i3][i2][i1];
a = fabs(r[i3][i2][i1]);
if (a > tmp) tmp = a;
}
}
}
*rnmu = tmp;
*rnm2 = sqrt(s/(double)n);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void rep_nrm(double ***u, int n1, int n2, int n3,
char *title, int kk) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c report on norm
c-------------------------------------------------------------------*/
double rnm2, rnmu;
norm2u3(u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]);
printf(" Level%2d in %8s: norms =%21.14e%21.14e\n",
kk, title, rnm2, rnmu);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void comm3(double ***u, int n1, int n2, int n3, int kk) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c comm3 organizes the communication on all borders
c-------------------------------------------------------------------*/
int i1, i2, i3;
/* axis = 1 */
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for ( i3 = 1; i3 < n3-1; i3++) {
for ( i2 = 1; i2 < n2-1; i2++) {
u[i3][i2][n1-1] = u[i3][i2][1];
u[i3][i2][0] = u[i3][i2][n1-2];
}
// }
/* axis = 2 */
//#pragma omp for
// for ( i3 = 1; i3 < n3-1; i3++) {
for ( i1 = 0; i1 < n1; i1++) {
u[i3][n2-1][i1] = u[i3][1][i1];
u[i3][0][i1] = u[i3][n2-2][i1];
}
}
/* axis = 3 */
#pragma omp for nowait
for ( i2 = 0; i2 < n2; i2++) {
for ( i1 = 0; i1 < n1; i1++) {
u[n3-1][i2][i1] = u[1][i2][i1];
u[0][i2][i1] = u[n3-2][i2][i1];
}
}
}//end #pragma omp parallel
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zran3 loads +1 at ten randomly chosen points,
c loads -1 at a different ten random points,
c and zero elsewhere.
c-------------------------------------------------------------------*/
#define MM 10
static double __A=1220703125.e0;
#define A __A
#define X 314159265.e0
int i0, m0, m1;
int i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
double ten[MM][2], best;
int i, j1[MM][2], j2[MM][2], j3[MM][2];
int jg[4][MM][2];
double rdummy;
a1 = power( A, nx );
a2 = power( A, nx*ny );
zero3(z,n1,n2,n3);
i = is1-1+nx*(is2-1+ny*(is3-1));
ai = power( A, i );
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
x0 = X;
rdummy = randlc( &x0, ai );
for (i3 = 1; i3 < e3; i3++) {
x1 = x0;
for (i2 = 1; i2 < e2; i2++) {
xx = x1;
vranlc( d1, &xx, A, &(z[i3][i2][0]));
rdummy = randlc( &x1, a1 );
}
rdummy = randlc( &x0, a2 );
}
/*--------------------------------------------------------------------
c call comm3(z,n1,n2,n3)
c call showall(z,n1,n2,n3)
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c each processor looks for twenty candidates
c-------------------------------------------------------------------*/
for (i = 0; i < MM; i++) {
ten[i][1] = 0.0;
j1[i][1] = 0;
j2[i][1] = 0;
j3[i][1] = 0;
ten[i][0] = 1.0;
j1[i][0] = 0;
j2[i][0] = 0;
j3[i][0] = 0;
}
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
if ( z[i3][i2][i1] > ten[0][1] ) {
ten[0][1] = z[i3][i2][i1];
j1[0][1] = i1;
j2[0][1] = i2;
j3[0][1] = i3;
bubble( ten, j1, j2, j3, MM, 1 );
}
if ( z[i3][i2][i1] < ten[0][0] ) {
ten[0][0] = z[i3][i2][i1];
j1[0][0] = i1;
j2[0][0] = i2;
j3[0][0] = i3;
bubble( ten, j1, j2, j3, MM, 0 );
}
}
}
}
/*--------------------------------------------------------------------
c Now which of these are globally best?
c-------------------------------------------------------------------*/
i1 = MM - 1;
i0 = MM - 1;
for (i = MM - 1 ; i >= 0; i--) {
best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]];
if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) {
jg[0][i][1] = 0;
jg[1][i][1] = is1 - 1 + j1[i1][1];
jg[2][i][1] = is2 - 1 + j2[i1][1];
jg[3][i][1] = is3 - 1 + j3[i1][1];
i1 = i1-1;
} else {
jg[0][i][1] = 0;
jg[1][i][1] = 0;
jg[2][i][1] = 0;
jg[3][i][1] = 0;
}
ten[i][1] = best;
best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]];
if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) {
jg[0][i][0] = 0;
jg[1][i][0] = is1 - 1 + j1[i0][0];
jg[2][i][0] = is2 - 1 + j2[i0][0];
jg[3][i][0] = is3 - 1 + j3[i0][0];
i0 = i0-1;
} else {
jg[0][i][0] = 0;
jg[1][i][0] = 0;
jg[2][i][0] = 0;
jg[3][i][0] = 0;
}
ten[i][0] = best;
}
m1 = i1+1;
m0 = i0+1;
/* printf(" negative charges at");
for (i = 0; i < MM; i++) {
if (i%5 == 0) printf("\n");
printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]);
}
printf("\n positive charges at");
for (i = 0; i < MM; i++) {
if (i%5 == 0) printf("\n");
printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]);
}
printf("\n small random numbers were\n");
for (i = MM-1; i >= 0; i--) {
printf(" %15.8e", ten[i][0]);
}
printf("\n and they were found on processor number\n");
for (i = MM-1; i >= 0; i--) {
printf(" %4d", jg[0][i][0]);
}
printf("\n large random numbers were\n");
for (i = MM-1; i >= 0; i--) {
printf(" %15.8e", ten[i][1]);
}
printf("\n and they were found on processor number\n");
for (i = MM-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
}
printf("\n");*/
#pragma omp parallel for private(i2, i1)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
for (i = MM-1; i >= m0; i--) {
z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0;
}
for (i = MM-1; i >= m1; i--) {
z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0;
}
comm3(z,n1,n2,n3,k);
/*--------------------------------------------------------------------
c call showall(z,n1,n2,n3)
c-------------------------------------------------------------------*/
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void showall(double ***z, int n1, int n2, int n3) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i1,i2,i3;
int m1, m2, m3;
m1 = min(n1,18);
m2 = min(n2,14);
m3 = min(n3,18);
printf("\n");
for (i3 = 0; i3 < m3; i3++) {
for (i1 = 0; i1 < m1; i1++) {
for (i2 = 0; i2 < m2; i2++) {
printf("%6.3f", z[i3][i2][i1]);
}
printf("\n");
}
printf(" - - - - - - - \n");
}
printf("\n");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static double power( double a, int n ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c power raises an integer, disguised as a double
c precision real, to an integer power
c-------------------------------------------------------------------*/
double aj;
int nj;
double rdummy;
double power;
power = 1.0;
nj = n;
aj = a;
while (nj != 0) {
if( (nj%2) == 1 ) rdummy = randlc( &power, aj );
rdummy = randlc( &aj, aj );
nj = nj/2;
}
return (power);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void bubble( double ten[M][2], int j1[M][2], int j2[M][2],
int j3[M][2], int m, int ind ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c bubble does a bubble sort in direction dir
c-------------------------------------------------------------------*/
double temp;
int i, j_temp;
if ( ind == 1 ) {
for (i = 0; i < m-1; i++) {
if ( ten[i][ind] > ten[i+1][ind] ) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
} else {
for (i = 0; i < m-1; i++) {
if ( ten[i][ind] < ten[i+1][ind] ) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void zero3(double ***z, int n1, int n2, int n3) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i1, i2, i3;
#pragma omp parallel for private(i1,i2,i3)
for (i3 = 0;i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
/*---- end of program ------------------------------------------------*/
|
contact_residualbased_elimination_builder_and_solver_with_constraints.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS )
#define KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS
/* System includes */
#include <unordered_set>
#include <unordered_map>
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_with_constraints.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
* @ingroup ContactStructuralMechanicsApplication
* @brief Current class provides an implementation for contact builder and solving operations. (elimination)
* @details The RHS is constituted by the unbalanced loads (residual). Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet and not considered the inactive ones. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Vicente Mataix Ferrandiz
* @tparam TSparseSpace The sparse matrix system considered
* @tparam TDenseSpace The dense matrix system
* @tparam TLinearSolver The type of linear solver considered
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
: public ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ContactResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ContactResidualBasedEliminationBuilderAndSolverWithConstraints);
/// Builder and solver base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseBuilderAndSolverType;
/// Definitions dependent of the base class
typedef ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver > BaseType;
/// The definition of the current class
typedef ContactResidualBasedEliminationBuilderAndSolverWithConstraints<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
/// Base types definitions
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodeType NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// General containers type definitions
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintContainerType;
/// Additional definitions
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef typename BaseType::EquationIdVectorType EquationIdVectorType;
typedef typename BaseType::DofsVectorType DofsVectorType;
/// DoF types definition
typedef typename BaseType::DofType DofType;
typedef typename BaseType::DofPointerType DofPointerType;
/// The DoF pointer vector type definition
typedef std::vector<typename DofType::Pointer> DofPointerVectorType;
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// Index set definition
typedef std::unordered_set<IndexType> IndexSetType;
///@}
///@name Enum's
///@{
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ContactResidualBasedEliminationBuilderAndSolverWithConstraints() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ContactResidualBasedEliminationBuilderAndSolverWithConstraints(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/** Constructor.
*/
ContactResidualBasedEliminationBuilderAndSolverWithConstraints(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ContactResidualBasedEliminationBuilderAndSolverWithConstraints() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseBuilderAndSolverType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
/**
* @brief It organises the dofset in order to speed up the building phase
* @param rModelPart The model part to compute
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpSystemWithConstraints(rModelPart);
else
BaseSetUpSystem(rModelPart);
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpDofSetWithConstraints(pScheme, rModelPart);
else
BaseType::SetUpDofSet(pScheme, rModelPart);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "contact_residual_elimination_builder_and_solver_with_constraints"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "contact_residual_elimination_builder_and_solver_with_constraints";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs.
* @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSetWithConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
)
{
KRATOS_TRY;
// We are going to enforce the existence of constraints for LM for each displacement dof
if (rModelPart.NodesBegin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER)) {
// Reorder constrains
IndexType constraint_id = 1;
for (auto& constrain : rModelPart.MasterSlaveConstraints()) {
constrain.SetId(constraint_id);
++constraint_id;
}
// Auxiliar dofs lists
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
// Contributions to the system
LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0);
LocalSystemVectorType constant_vector = LocalSystemVectorType(0);
// Reference constraint
const auto& r_clone_constraint = KratosComponents<MasterSlaveConstraint>::Get("LinearMasterSlaveConstraint");
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, dof_list, second_dof_list)
{
// Current process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// A buffer to store auxiliar constraints
ConstraintContainerType constraints_buffer;
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
DofPointerVectorType slave_dofs, master_dofs;
bool create_lm_constraint = false;
// We check if we have SLAVE nodes in the master dofs
bool slave_nodes_master_dof = false;
// Master DoFs
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->Is(SLAVE)) { // The nodes computing contact are the slave nodes
slave_nodes_master_dof = true;
break;
}
}
}
// Slave DoFs
for (auto& p_dof : dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->IsNot(INTERFACE) || slave_nodes_master_dof) { // Nodes from the contact interface cannot be slave DoFs
if (r_variable == DISPLACEMENT_X) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
} else { // We remove it
it_const->Set(TO_ERASE);
}
}
}
// Master DoFs
if (slave_nodes_master_dof) { // The nodes computing contact are the slave nodes
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (r_variable == DISPLACEMENT_X) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
}
}
}
// We check if we create constraints
if ((slave_dofs.size() == dof_list.size()) &&
(master_dofs.size() == second_dof_list.size())) {
create_lm_constraint = true;
}
// We create the new constraint
if (create_lm_constraint) {
auto p_constraint = r_clone_constraint.Create(constraint_id + i + 1, master_dofs, slave_dofs, transformation_matrix, constant_vector);
(constraints_buffer).insert((constraints_buffer).begin(), p_constraint);
}
}
// We transfer
#pragma omp critical
{
rModelPart.AddMasterSlaveConstraints(constraints_buffer.begin(),constraints_buffer.end());
}
}
}
// We remove the marked constraints
rModelPart.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE);
KRATOS_INFO_IF("ContactResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 0)) <<
"Model part after creating new constraints" << rModelPart << std::endl;
// Calling base SetUpDofSetWithConstraints
BaseType::SetUpDofSetWithConstraints(pScheme, rModelPart);
KRATOS_CATCH("");
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystemWithConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
// First we set up the system of equations without constraints
BaseSetUpSystem(rModelPart);
// Add the computation of the global ids of the solvable dofs
IndexType counter = 0;
for (auto& dof : BaseType::mDofSet) {
if (dof.EquationId() < BaseType::mEquationSystemSize) {
auto it = BaseType::mDoFSlaveSet.find(dof);
if (it == BaseType::mDoFSlaveSet.end()) {
++counter;
}
}
}
// The total system of equations to be solved
BaseType::mDoFToSolveSystemSize = counter;
KRATOS_CATCH("ContactResidualBasedEliminationBuilderAndSolverWithConstraints::FormulateGlobalMasterSlaveRelations failed ..");
}
/**
* @brief It organises the dofset in order to speed up the building phase (base one)
* @param rModelPart The model part to compute
*/
void BaseSetUpSystem(ModelPart& rModelPart)
{
/**
* Idem to the not contact version, except that if we fix the displacement in one slave node we should fix the corresponding LM for consistency
*/
// We create a set of dofs of the displacement slave dofs with LM associated
std::unordered_map<IndexType, IndexSetType> set_nodes_with_lm_associated;
if (rModelPart.HasSubModelPart("Contact"))
set_nodes_with_lm_associated.reserve(rModelPart.GetSubModelPart("Contact").NumberOfNodes());
// Allocating auxiliar parameters
IndexType node_id;
// We start the dof loop
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
if (IsLMDof(i_dof))
set_nodes_with_lm_associated.insert({node_id, IndexSetType({})});
}
// Auxiliar keys
const IndexType key_lm_x = VECTOR_LAGRANGE_MULTIPLIER_X.Key();
const IndexType key_lm_y = VECTOR_LAGRANGE_MULTIPLIER_Y.Key();
const IndexType key_lm_z = VECTOR_LAGRANGE_MULTIPLIER_Z.Key();
// We see which LM block
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if ( it != set_nodes_with_lm_associated.end()) {
if (i_dof.IsFixed()) {
const auto& r_variable = i_dof.GetVariable();
auto& aux_set = (it->second);
if (r_variable == DISPLACEMENT_X) {
aux_set.insert(key_lm_x);
} else if (r_variable == DISPLACEMENT_Y) {
aux_set.insert(key_lm_y);
} else if (r_variable == DISPLACEMENT_Z) {
aux_set.insert(key_lm_z);
}
}
}
}
// We do now the loop over the dofs
for (auto& i_dof : BaseType::mDofSet) {
if (i_dof.IsFree()) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if (it != set_nodes_with_lm_associated.end()) {
auto& aux_set = it->second;
if (aux_set.find((i_dof.GetVariable()).Key()) != aux_set.end()) {
i_dof.FixDof();
}
}
}
}
BaseType::SetUpSystem(rModelPart);
}
/**
* @brief Checks if the degree of freedom belongs to a displacement DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a displacement dof
*/
static inline bool IsDisplacementDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == DISPLACEMENT_X ||
r_variable == DISPLACEMENT_Y ||
r_variable == DISPLACEMENT_Z) {
return true;
}
return false;
}
/**
* @brief Checks if the degree of freedom belongs to a LM DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a LM dof
*/
static inline bool IsLMDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
return true;
}
return false;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ContactResidualBasedEliminationBuilderAndSolverWithConstraints */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS defined */
|
omptest-ori.c | #include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include <nautilus/libccompat.h>
#include <nautilus/random.h>
//#include <nautilus/scheduler.h>
#ifndef NAUT_CONFIG_DEBUG_GPUDEV
#undef DEBUG_PRINT
#define DEBUG_PRINT(fmt, args...)
#endif
#define ERROR(fmt, args...) ERROR_PRINT("omptest: " fmt, ##args)
#define DEBUG(fmt, args...) DEBUG_PRINT("omptest: " fmt, ##args)
#define INFO(fmt, args...) INFO_PRINT("omptest: " fmt, ##args)
static inline uint16_t random()
{
uint16_t t;
nk_get_rand_bytes((uint8_t *)&t,sizeof(t));
return t;
}
#define MAXN 5100 /* Max value of N */
int N; /* Matrix size */
int procs; /* Number of processors to use */
/* Matrices and vectors */
volatile float A[MAXN][MAXN], B[MAXN], X[MAXN];
volatile float ORA[MAXN][MAXN], ORB[MAXN], ORX[MAXN];
/* A * X = B, solve for X */
int seed;
/* Prototype */
void gauss(); /* The function you will provide.
* It is this routine that is timed.
* It is called only on the parent.
*/
/* Initialize A and B (and X to 0.0s) */
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
// #pragma omp parallel num_threads(8)
{
// #pragma omp for private(row,col) schedule(static,1) nowait
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
ORA[col][row] = (float) random()/32768.0;
}
ORB[col] = (float)random()/32768.0;
}
}
}
void reset_inputs(){
int row, col;
printf("\n reseting...\n");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
A[row][col] = ORA[row][col];
}
B[col] = ORB[col];
X[col] = 0.0;
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 1000) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
printf("\nB = [");
for (col = 0; col < N; col++) {
printf("%5.2f%s", B[col], (col < N-1) ? "; " : "]\n");
}
}
}
void serialgauss(){
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
printf("Computing serially.\n");
/* Gaussian elimination */
for (norm = 0; norm < N - 1; norm++) {
// int num = N - norm;
{
//printf("%f ", A[norm][norm]);
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm] / A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
//printf("%5.2f ", X[row]);
}
}
void ompgauss() {
int norm, row, col; /* Normalization row, and zeroing
* element row and col */
float multiplier;
//doneflag[0] = 1;
printf("Computing using omp.\n");
/* Gaussian elimination */
#pragma omp parallel private(row, col, multiplier, norm) num_threads(procs)
{
for (norm = 0; norm < N - 1; norm++) {
#pragma omp for schedule(static,1)
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm]/A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
nk_vc_printf("I am done\n");
/* (Diagonal elements are not normalized to 1. This is treated in back
* substitution.)
*/
/* Back substitution */
for (row = N - 1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col--) {
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
}
}
#define TIME() (double)nk_sched_get_realtime();
static int handle_omptest (char * buf, void * priv)
{
int seed, size, np;
if ((sscanf(buf,"omptest %d %d %d",&seed,&size,&np)!=3)) {
nk_vc_printf("Don't understand %s please input seed, matrix size and nprocs\n",buf);
return -1;
}
nk_rand_seed(seed);
N = size;
procs = np;
nk_vc_printf("seed %d, size, %d, nprocs: %d\n", seed, N, procs);
initialize_inputs();
reset_inputs();
// print_inputs();
unsigned mxcsr;
__asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory");
printf("ld %04x \n", mxcsr);
mxcsr = mxcsr ^ 0x0200;
printf("st %08x \n", mxcsr);
__asm__ volatile("stmxcsr %0"::"m"(*&mxcsr):"memory");
__asm__ volatile("ldmxcsr %0"::"m"(*&mxcsr):"memory");
printf("ld %08x \n", mxcsr);
double start = TIME();
ompgauss();
double end = TIME();
double omp = end-start;
nk_vc_printf("openmp done %lf\n", omp);
float OMP[N];
for(int row =0; row<N; row++){
OMP[row] = X[row];
}
reset_inputs();
start = TIME();
serialgauss();
end = TIME();
double serial = end-start;
nk_vc_printf("serial done %lf\n", serial);
float difference = 0.0;
for(int row =0; row<N; row++){
difference += (OMP[row]- X[row]);
}
nk_vc_printf("OMP difference %f speed up %f !\n", difference, serial/omp);
return 0;
}
static struct shell_cmd_impl omptest_impl = {
.cmd = "omptest",
.help_str = "openmp test",
.handler = handle_omptest,
};
nk_register_shell_cmd(omptest_impl);
|
fastmap.h | #ifndef FASTMAP_FASTMAP_
#define FASTMAP_FASTMAP_
#include "safeomp.h"
static inline int sample(const int min, const int max)
{
return min + (int) ((double)(max-min + 1) * unif_rand());
}
struct CustomMax
{
double value;
int index;
};
#ifdef OMP_VER_4
#pragma omp declare reduction(maximum : struct CustomMax : omp_out = (omp_in.value>omp_out.value ? omp_in : omp_out))
#endif
static inline void find_most_distant(const int m, const int n, const double *const restrict x, double *restrict a, double *restrict b, double *restrict work)
{
struct CustomMax max;
max.value = 0.0;
max.index = 0;
#ifdef OMP_VER_4
#pragma omp parallel for default(shared) reduction(maximum:max) if(m*n>OMP_MIN_SIZE)
#endif
for (int i=0; i<m; i++)
{
const int tid = omp_get_thread_num();
SAFE_SIMD
for (int j=0; j<n; j++)
work[j + tid*n] = -x[i + m*j];
daxpy_(&n, &(double){1.0}, b, &(int){1}, work + tid*n, &(int){1});
double tmp = dnrm3(n, work + tid*n, 2);
if (tmp > max.value)
{
max.value = tmp;
max.index = i;
}
}
#ifdef OMP_VER_4
#pragma omp parallel for simd if(n>OMP_MIN_SIZE)
#else
#pragma omp parallel for if(n>OMP_MIN_SIZE)
#endif
for (int j=0; j<n; j++)
a[j] = x[max.index + m*j];
}
static inline void fastmap(const int m, const int n, const double *const restrict x, double *const restrict a, double *const restrict b, double *restrict work)
{
// Take random row b in x;
const int index = sample(0, m-1);
for (int j=0; j<n; j++)
b[j] = x[index + m*j];
// a = most distant point in x from b
find_most_distant(m, n, x, a, b, work);
// b = the most distant point in x from a
find_most_distant(m, n, x, b, a, work);
}
#endif
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < minimum)
minimum=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
minimum-=maximum;
if (minimum < 0.0)
minimum=0.0;
pixel=minimum;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
combined_barrier.c | /*
* COMBINED OPENMP-MPI BARRIER
* OpenMP: Centralized sense reversal
* MPI: Tournament
* To show correct functionality of barrier: Uncomment line 182
* To compile: mpicc -o combined_barrier combined_barrier.c -lm -fopenmp
* To run: mpiexec combined_barrier [num_threads num_barriers]
*/
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "mpi.h"
#define DROPOUT 4
#define WINNER 0
#define LOSER 1
#define BYE 2
#define CHAMPION 3
typedef struct
{
int role;
int opponent;
int flag;
MPI_Status status;
int send_buffer;
int receive_buffer;
} record_t;
record_t* record;
int P, N, T;
int rank;
int startcount;
bool globalSense = 1;
bool *localSense;
void SenseReversalBarrier_Init()
{
int i;
startcount = T;
localSense = (bool*) malloc(sizeof(bool)*(T));
for (i = 0; i < T; ++i)
localSense[i] = true;
globalSense = true;
}
void tournament_barrier_init()
{
int i, k;
MPI_Comm_size(MPI_COMM_WORLD,&P);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
int rounds = ceil((log(P)/log(2)));
record = (record_t*) calloc(rounds+1,sizeof(record_t));
int power_of_round;
for(k=0;k<=rounds;k++)
{
//Initializing
record[k].flag = false;
record[k].opponent = -1;
record[k].role = -1;
record[k].send_buffer=1;
power_of_round=1<<(k-1);
//Initializing role
if (k==0)
{
record[k].role = DROPOUT;
}
else if (k>0)
{
if (rank == 0 && (1<<k)>=P)
{
record[k].role = CHAMPION;
record[k].opponent = power_of_round + rank;
}
else if (rank%(1<<k) == 0)
{
if (((rank + (1<<(k-1))) < P) && ((1<<k) < P))
{
record[k].role = WINNER;
record[k].opponent = power_of_round + rank;
}
else if ((rank + (1<<(k-1))) >= P)
{
record[k].role = BYE;
}
}
else if ((rank%(1<<k)) == (1<<(k-1)))
{
record[k].role = LOSER;
record[k].opponent = rank - power_of_round;
}
}
}
}
void centralized_tournament_init()
{
SenseReversalBarrier_Init();
tournament_barrier_init();
}
void tournament_barrier(int barrier)
{
int round = 0;
int flag=1;
//arrival
while(flag)
{
round=round+1;
switch(record[round].role)
{
case WINNER:
MPI_Recv(&record[round].receive_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD,&record[round].status);
break;
case LOSER:
MPI_Send(&record[round].send_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD);
MPI_Recv(&record[round].receive_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD, &record[round].status);
flag=0;
break;
case CHAMPION:
MPI_Recv(&record[round].receive_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD, &record[round].status);
MPI_Send(&record[round].send_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD);
flag=0;
break;
case BYE:
continue;
case DROPOUT:
continue;
default:
continue;
}
}
flag=1;
//wake up
while(flag)
{
round = round - 1;
switch(record[round].role)
{
case WINNER:
MPI_Send(&record[round].send_buffer, 1, MPI_INT, record[round].opponent, round, MPI_COMM_WORLD);
continue;
case LOSER:
case CHAMPION:
case BYE:
continue;
case DROPOUT:
flag=0;
break;
default:
continue;
}
}
}
//Gets the current count and decrements it. Also returns the current count
int FetchAndDecrementCount()
{
int myCount;
#pragma omp critical
{
myCount = startcount;
startcount--;
}
return myCount;
}
void combined_barrier(int barrier)
{
int j, myCount;
int thread_num = omp_get_thread_num();
// printf("Thread %d, rank %d, Entering combined_barrier %d\n", thread_num, rank, barrier);
localSense[thread_num] = !localSense[thread_num]; // Toggle private sense variable
if (FetchAndDecrementCount() == 1)
{
startcount = T;
globalSense = 1 - globalSense;
}
else
{
while (globalSense != localSense[thread_num]); // Spin
}
if(thread_num == 0){
tournament_barrier(barrier);
}
}
int main(int argc, char **argv)
{
struct timeval tv1, tv2;
double total_time;
int ret_val = MPI_Init(&argc, &argv);
if (ret_val != MPI_SUCCESS)
{
printf("Failure initializing MPI\n");
MPI_Abort(MPI_COMM_WORLD, ret_val);
}
if (argc==3){
if (sscanf (argv[1], "%d", &T)!=1) printf ("T - not an integer\n");
if (sscanf (argv[2], "%d", &N)!=1) printf ("N - not an integer\n");
}
else {T = 3; N = 1000;}
centralized_tournament_init();
if (rank == 0)
gettimeofday(&tv1, NULL);
#pragma omp parallel num_threads(T) shared(record, N)
{
int i;
for (i = 0; i < N; ++i)
{
// printf("==============BARRIER %d=================\n", i);
combined_barrier(i);
// printf("==============BARRIER %d=================\n", i);
combined_barrier(i);
// printf("==============BARRIER %d=================\n", i);
combined_barrier(i);
// printf("==============BARRIER %d=================\n", i);
combined_barrier(i);
// printf("==============BARRIER %d=================\n", i);
combined_barrier(i);
}
}
if (rank == 0){
gettimeofday(&tv2, NULL);
total_time = (double) (tv2.tv_usec - tv1.tv_usec) + (double) (tv2.tv_sec - tv1.tv_sec)*1000000;
printf("\nSUMMARY:\nNumber of processes: %d"
"\nNumber of threads: %d"
"\nTotal run-time for %d"
"loops with 5 barriers per loop: %fs\n"
"The average time per barrier: %fus\n",
P, T, N, total_time/1000000, (double)(total_time/(N*5)));
}
free(record);
MPI_Finalize();
return 0;
}
|
DRB084-threadprivatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
Data race pairs sum0@61:3 vs. sum0@61:8
sum0@61:3 vs. sum0@61:3
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
//#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
int i, sum=0;
for (i=1;i<=1000;i++)
{
foo (i);
}
sum= sum+sum0;
/* reference calculation */
#pragma omp parallel for reduction(+:sum1)
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
return 0;
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "MagickCore/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSL(GetPixelRed(image,p),GetPixelGreen(image,p),
GetPixelBlue(image,p),&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text,
exception);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text,
exception);
if (fabs(brightness_standard_deviation) >= MagickEpsilon)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text,
exception);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text,
exception);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text,
exception);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text,
exception);
if (fabs(saturation_standard_deviation) >= MagickEpsilon)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text,
exception);
if (fabs(saturation_standard_deviation) >= MagickEpsilon)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text,
exception);
}
return(MagickImageFilterSignature);
}
|
openmp_closest_bygraph.c | /*
Bag of Tasks OpenMP implementation to find the closest pairs of waypoints
in each of a set of METAL TMG graph files.
The tasks to complete are to find the closest pair of points in
METAL TMG files given as command-line parameters in argv[2] through
argv[argc-1].
The tasks are distributed in an order based on the string passed as
argv[1], which is one of:
"orig": the order that the files are presented on the command line
"alpha": alphabetical order by filename
"size": from largest to smallest number of points in the file
"random": randomized order
Jim Teresco, Fall 2021
Siena College
*/
#include <float.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "timer.h"
#include "tmggraph.h"
// struct to encapsulate info about the tasks in the bag
typedef struct cptask {
int num_vertices;
char *filename;
} cptask;
// helper function to read only up to the number of vertices from a
// TMG file and return that number
int read_tmg_vertex_count(char *filename) {
FILE *fp = fopen(filename, "r");
if (!fp) {
fprintf(stderr, "Cannot open file %s for reading.\n", filename);
exit(1);
}
// read over first line
char temp[100];
fscanf(fp, "%s %s %s", temp, temp, temp);
// read number of vertices
int nv;
fscanf(fp, "%d", &nv);
// that's all we need for now
fclose(fp);
return nv;
}
int main(int argc, char *argv[]) {
int num_threads;
// about how many distance calculations?
long dcalcs = 0;
int worker_rank;
int num_tasks;
int i;
struct timeval start_time, stop_time;
double active_time;
// all parameters except argv[0] (program name) and argv[1] (input
// ordering) will be filenames to load, so the number of tasks is
// argc - 2
num_tasks = argc - 2;
if (argc < 3) {
fprintf(stderr, "Usage: %s orig|alpha|size|random filenames\n", argv[0]);
exit(1);
}
// check for a valid ordering in argv[1];
char *orderings[] = {
"orig",
"alpha",
"size",
"random"
};
int ordering = -1;
for (i = 0; i < 4; i++) {
if (strcmp(argv[1], orderings[i]) == 0) {
ordering = i;
break;
}
}
if (ordering == -1) {
fprintf(stderr, "Usage: %s orig|alpha|size|random filenames\n", argv[0]);
exit(1);
}
printf("Have %d tasks to be done\n", num_tasks);
// start the timer
gettimeofday(&start_time, NULL);
// allocate and populate our "bag of tasks" array
cptask **tasks = (cptask **)malloc(num_tasks*sizeof(cptask *));
// add the first at pos 0, since we know there's at least one and
// this will eliminate some special cases in our code below.
tasks[0] = (cptask *)malloc(sizeof(cptask));
tasks[0]->filename = argv[2];
if (ordering == 2) {
tasks[0]->num_vertices = read_tmg_vertex_count(argv[3]);
}
// get them all in
for (i = 1; i < num_tasks; i++) {
cptask *taski = (cptask *)malloc(sizeof(cptask));
taski->filename = argv[i+2];
int pos = i;
int insertat;
switch (ordering) {
case 0:
// original ordering as specified by argv
tasks[i] = taski;
break;
case 1:
// alphabetical order by filename
while (pos > 0 && strcmp(taski->filename, tasks[pos-1]->filename) < 0) {
tasks[pos] = tasks[pos-1];
pos--;
}
tasks[pos] = taski;
break;
case 2:
// order by size largest to smallest number of vertices
taski->num_vertices = read_tmg_vertex_count(taski->filename);
while (pos > 0 && taski->num_vertices >= tasks[pos-1]->num_vertices) {
tasks[pos] = tasks[pos-1];
pos--;
}
tasks[pos] = taski;
break;
case 3:
// order randomly
insertat = random()%(pos+1);
while (pos > insertat) {
tasks[pos] = tasks[pos-1];
pos--;
}
tasks[pos] = taski;
break;
}
}
// for thread stats
int minjobs = num_tasks+1;
int maxjobs = 0;
long mincalcs = LONG_MAX;
long maxcalcs = 0L;
long totalcalcs = 0;
double mintime = DBL_MAX;
double maxtime = 0.0;
// what's the next task available in the bag of tasks (index into array)
int next_task = 0;
#pragma omp parallel shared(tasks, next_task, minjobs, maxjobs, mincalcs, maxcalcs, totalcalcs, mintime, maxtime, num_threads)
{
struct timeval start_time, stop_time;
// start the timer
gettimeofday(&start_time, NULL);
int my_task = -1;
int jobs_done = 0;
long dcalcs = 0L;
int thread_num = omp_get_thread_num();
num_threads = omp_get_num_threads();
while (1) {
// grab a task from the bag
#pragma omp critical(mutex)
my_task = next_task++;
if (my_task >= num_tasks) break;
// this thread can process this one
printf("[%d] working on %s\n", thread_num, tasks[my_task]->filename);
tmg_graph *g = tmg_load_graph(tasks[my_task]->filename);
if (g == NULL) {
fprintf(stderr, "Could not create graph from file %s\n",
tasks[my_task]->filename);
exit(1);
}
int v1, v2;
double distance;
// do it
tmg_closest_pair(g, &v1, &v2, &distance);
jobs_done++;
long job_calcs = g->num_vertices;
job_calcs *= g->num_vertices;
job_calcs /= 2;
dcalcs += job_calcs;
printf("[%d] %s closest pair #%d %s (%.6f,%.6f) and #%d %s (%.6f,%.6f) distance %.15f\n",
thread_num, tasks[my_task]->filename, v1,
g->vertices[v1]->w.label,
g->vertices[v1]->w.coords.lat, g->vertices[v1]->w.coords.lng,
v2, g->vertices[v2]->w.label,
g->vertices[v2]->w.coords.lat, g->vertices[v2]->w.coords.lng,
distance);
tmg_graph_destroy(g);
}
gettimeofday(&stop_time, NULL);
double thread_elapsed_time = diffgettime(start_time, stop_time);
// separate critical section for accumulation and update of
// simulation stats
#pragma omp critical(stats)
{
if (jobs_done < minjobs)
minjobs = jobs_done;
if (jobs_done > maxjobs)
maxjobs = jobs_done;
if (dcalcs < mincalcs)
mincalcs = dcalcs;
if (dcalcs > maxcalcs)
maxcalcs = dcalcs;
totalcalcs += dcalcs;
if (thread_elapsed_time < mintime)
mintime = thread_elapsed_time;
if (thread_elapsed_time > maxtime)
maxtime = thread_elapsed_time;
}
printf("[%d] terminating\n", thread_num);
}
// get main thread's elapsed time
gettimeofday(&stop_time, NULL);
active_time = diffgettime(start_time, stop_time);
double avgjobs = 1.0*num_tasks/num_threads;
printf("Main thread was active for %.4f seconds\n", active_time);
printf("%d workers processed %d jobs with about %ld distance calculations\n",
num_threads, num_tasks, totalcalcs);
printf("Job balance: min %d, max %d, avg: %.2f\n", minjobs, maxjobs,
avgjobs);
printf("Distance calculation balance: min %ld, max %ld, avg: %.2f\n",
mincalcs, maxcalcs, ((1.0*totalcalcs)/num_threads));
printf("Active time balance: min %.4f, max %.4f\n", mintime, maxtime);
for (i = 0; i < num_tasks; i++) {
free(tasks[i]);
}
free(tasks);
return 0;
}
|
GB_bitmap_assign_IxJ_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_IxJ_template: iterate over all of C(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Iterate over all positions in the IxJ Cartesian product. This is all
// entries C(i,j) where i is in the list I and j is in the list J. This
// traversal occurs whether or not C(i,j) is an entry present in C.
// The C matrix is accessed at C(I,J). The A matrix is size |I|-by-|J|.
// For bitmap assignent, C(I,J)=A is being computed. For bitmap extraction,
// C=A(I,J) so the roles of A and C are swapped (see GB_bitmap_subref.c).
{
//--------------------------------------------------------------------------
// create the tasks to iterate over IxJ
//--------------------------------------------------------------------------
int ntasks = 0, nthreads ;
GB_task_struct *TaskList = NULL ; size_t TaskList_size = 0 ;
GB_OK (GB_subassign_IxJ_slice (&TaskList, &TaskList_size, &ntasks,
&nthreads, /* I, */ nI, /* Ikind, Icolon, J, */ nJ,
/* Jkind, Jcolon, */ Context)) ;
//--------------------------------------------------------------------------
// iterate over all IxJ
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
int64_t task_cnvals = 0 ;
bool fine_task = (klast == -1) ;
int64_t iA_start = 0, iA_end = nI ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
iA_start = TaskList [taskid].pA ;
iA_end = TaskList [taskid].pA_end ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t jA = kfirst ; jA <= klast ; jA++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, jA, Jkind, Jcolon) ;
int64_t pC0 = jC * vlen ; // first entry in C(:,jC)
int64_t pA0 = jA * nI ; // first entry in A(:,jA)
//------------------------------------------------------------------
// operate on C (I(iA_start,iA_end-1),jC)
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
int64_t pC = iC + pC0 ;
int64_t pA = iA + pA0 ;
// operate on C(iC,jC) at pC (if C is bitmap or full)
// and A(iA,jA) or M(iA,jA) at pA, if A and/or M are
// bitmap or full. M(iA,jA) is accessed only for the
// subassign method when M is bitmap or full.
GB_IXJ_WORK (pC, pA) ;
}
}
cnvals += task_cnvals ;
}
//--------------------------------------------------------------------------
// free workpace
//--------------------------------------------------------------------------
GB_FREE_WERK (&TaskList, TaskList_size) ;
}
|
phostname.c | #include <stdlib.h>
#include <stdio.h>
#include <strings.h>
#include <string.h>
#include <omp.h>
#include <ctype.h>
#include <mpi.h>
#include <math.h>
void NODE_COLOR();
char *trim ( char *s );
void dohelp();
void dohelp() {
/************************************************************
* This is a glorified hello world program. Each processor
* prints name, rank, and other information as described below.
* ************************************************************/
printf("phostname arguments:\n");
printf(" -h : Print this help message\n");
printf("\n");
printf("no arguments : Print a list of the nodes on which the command is run.\n");
printf("\n");
printf(" -f or -1 : Same as no argument but print MPI task id and Thread id\n");
printf(" If run with OpenMP threading enabled OMP_NUM_THREADS > 1\n");
printf(" there will be a line per MPI task and Thread.\n");
printf("\n");
printf(" -F or -2 : Add columns to tell first MPI task on a node and and the\n");
printf(" numbering of tasks on a node. (Hint: pipe this output in\n");
printf(" to sort -r\n");
printf("\n");
printf(" -a : Print a listing of the environmental variables passed to\n");
printf(" MPI task. (Hint: use the -l option with SLURM to prepend MPI\n");
printf(" task #.)\n");
}
int main(int argc, char **argv,char *envp[])
{
int myid,numprocs,resultlen;
int mycolor,new_id,new_nodes;
int i,k;
MPI_Comm node_comm;
char lname[MPI_MAX_PROCESSOR_NAME] ;
char *myname;
int full,envs,iarg,thr,tn,nt,help;
/* Format statements */
char *f1234="%4.4d %4.4d %18s %4.4d %4.4d\n";
char *f1235="%s %4.4d %4.4d\n";
char *f1236="%s\n";
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(lname,&resultlen);
/* Get rid of "stuff" from the processor name. */
myname=trim(lname);
/* The next line is required for BGQ because the MPI task ID
is encoded in the processor name and we don't want it. */
if (strrchr(myname,32))myname=strrchr(myname,32);
/* read in command line args from task 0 */
if(myid == 0 ) {
full=0;
envs=0;
help=0;
if (argc > 1 ) {
for (iarg=1;iarg<argc;iarg++) {
if ( (strcmp(argv[iarg],"-h") == 0) ||
(strcmp(argv[iarg],"--h") == 0) ||
(strcmp(argv[iarg],"-help") == 0) ) help=1;
/**/
if ((strcmp(argv[iarg],"-f") == 0) ||
(strcmp(argv[iarg],"-1") == 0) ) full=1;
/**/
if ( (strcmp(argv[iarg],"-F") == 0) ||
(strcmp(argv[iarg],"-2") == 0) ) full=2;
/**/
if (strcmp(argv[iarg],"-a") == 0) envs=1;
}
}
}
/* send info to all tasks, if doing help doit and quit */
MPI_Bcast(&help,1,MPI_INT,0,MPI_COMM_WORLD);
if(help == 1) {
if(myid == 0) dohelp();
MPI_Finalize();
exit(0);
}
MPI_Bcast(&full,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&envs,1,MPI_INT,0,MPI_COMM_WORLD);
if(myid == 0 && full == 2){
printf("task thread node name first task # on node\n");
}
/*********/
/* The routine NODE_COLOR will return the same value for all mpi
tasks that are running on the same node. We use this to create
a new communicator from which we get the numbering of tasks on
a node. */
NODE_COLOR(&mycolor);
MPI_Comm_split(MPI_COMM_WORLD,mycolor,myid,&node_comm);
MPI_Comm_rank( node_comm, &new_id );
MPI_Comm_size( node_comm, &new_nodes);
tn=-1;
nt=-1;
/* Here we print out the information with the format and
verbosity determined by the value of full. We do this
a task at a time to "hopefully" get a bit better formatting. */
for (i=0;i<numprocs;i++) {
MPI_Barrier(MPI_COMM_WORLD);
if ( i != myid ) continue;
#pragma omp parallel
{
nt=omp_get_num_threads();
if ( nt == 0 ) nt=1;
#pragma omp critical
{
if ( nt < 2 ) {
nt=1;
tn=0;
}
else {
tn=omp_get_thread_num();
}
if(full == 0) {
if(tn == 0)printf(f1236,trim(myname));
}
if(full == 1) {
printf(f1235,trim(myname),myid,tn);
}
if(full == 2){
printf(f1234,myid,tn,trim(myname),mycolor,new_id);
}
}
}
/* here we print out the environment in which a MPI task is running */
if (envs == 1 && new_id==0) {
k=0;
while(envp[k]) {
printf("%s\n",envp[k]);
k++;
}
}
}
MPI_Finalize();
}
char *trim ( char *s )
{
int i = 0;
int j = strlen ( s ) - 1;
int k = 0;
while ( isspace ( s[i] ) && s[i] != '\0' )
i++;
while ( isspace ( s[j] ) && j >= 0 )
j--;
while ( i <= j )
s[k++] = s[i++];
s[k] = '\0';
return s;
}
|
SpatialReplicationPadding.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialReplicationPadding.c"
#else
static void THNN_(SpatialReplicationPadding_updateOutput_frame)(
real *input_p, real *output_p,
int64_t nslices,
int64_t iwidth, int64_t iheight,
int64_t owidth, int64_t oheight,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int iStartX = fmax(0, -pad_l);
int iStartY = fmax(0, -pad_t);
int oStartX = fmax(0, pad_l);
int oStartY = fmax(0, pad_t);
int64_t k, ip_x, ip_y;
#pragma omp parallel for private(k, ip_x, ip_y)
for (k = 0; k < nslices; k++)
{
int64_t i, j;
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = iwidth + pad_l - 1;
}
ip_x = ip_x - oStartX + iStartX;
if (i < pad_t) {
ip_y = pad_t;
} else if (i >= pad_t && i < iheight + pad_t) {
ip_y = i;
} else {
ip_y = iheight + pad_t - 1;
}
ip_y = ip_y - oStartY + iStartY;
real *dest_p = output_p + k*owidth*oheight + i * owidth + j;
real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x;
*dest_p = *src_p;
}
}
}
}
void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state,
THTensor *input,
THTensor *output,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int dimw = 2;
int dimh = 1;
int dimslices = 0;
int64_t nbatch = 1;
int64_t nslices;
int64_t iheight;
int64_t iwidth;
int64_t oheight;
int64_t owidth;
real *input_data;
real *output_data;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = iheight + pad_t + pad_b;
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth >= 1 || oheight >= 1 , 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
iheight, iwidth, oheight, owidth);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(SpatialReplicationPadding_updateOutput_frame)(input_data, output_data,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
else
{
int64_t p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialReplicationPadding_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(SpatialReplicationPadding_updateGradInput_frame)(
real *ginput_p, real *goutput_p,
int64_t nslices,
int64_t iwidth, int64_t iheight,
int64_t owidth, int64_t oheight,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int iStartX = fmax(0, -pad_l);
int iStartY = fmax(0, -pad_t);
int oStartX = fmax(0, pad_l);
int oStartY = fmax(0, pad_t);
int64_t k, ip_x, ip_y;
#pragma omp parallel for private(k, ip_x, ip_y)
for (k = 0; k < nslices; k++)
{
int64_t i, j;
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = iwidth + pad_l - 1;
}
ip_x = ip_x - oStartX + iStartX;
if (i < pad_t) {
ip_y = pad_t;
} else if (i >= pad_t && i < iheight + pad_t) {
ip_y = i;
} else {
ip_y = iheight + pad_t - 1;
}
ip_y = ip_y - oStartY + iStartY;
real *src_p = goutput_p + k*owidth*oheight + i * owidth + j;
real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x;
*dest_p += *src_p;
}
}
}
}
void THNN_(SpatialReplicationPadding_updateGradInput)(THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int dimw = 2;
int dimh = 1;
int dimslices = 0;
int64_t nbatch = 1;
int64_t nslices;
int64_t iheight;
int64_t iwidth;
int64_t oheight;
int64_t owidth;
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = iheight + pad_t + pad_b;
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THTensor_(size)(gradOutput, dimw));
THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THTensor_(size)(gradOutput, dimh));
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (input->nDimension == 3) {
THNN_(SpatialReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput),
THTensor_(data)(gradOutput),
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
} else {
int64_t p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++) {
THNN_(SpatialReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput) + p * nslices * iheight * iwidth,
THTensor_(data)(gradOutput) + p * nslices * oheight * owidth,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
scrypt_fmt.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2013 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <stdio.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "escrypt/crypto_scrypt.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "scrypt"
#define FORMAT_NAME ""
#define FMT_TAG7 "$7$"
#define FMT_TAG7_LEN (sizeof(FMT_TAG7)-1)
#define FMT_CISCO9 "$9$"
#define FMT_CISCO9_LEN (sizeof(FMT_CISCO9)-1)
#define FMT_SCRYPTKDF "$ScryptKDF.pm$"
#define FMT_SCRYPTKDF_LEN (sizeof(FMT_SCRYPTKDF)-1)
#ifdef __XOP__
#define ALGORITHM_NAME "Salsa20/8 128/128 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Salsa20/8 128/128 AVX"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Salsa20/8 128/128 SSE2"
#else
#define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (16384, 8, 1)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 256
#define BINARY_ALIGN 1
#define SALT_SIZE BINARY_SIZE
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D", "pleaseletmein"},
{"$7$C6..../....\x01\x09\x0a\x0d\x20\x7f\x80\xff$b7cKqzsQk7txdc9As1WZBHjUPNWQWJW8A.UUUTA5eD1", "\x01\x09\x0a\x0d\x20\x7f\x80\xff"},
{"$7$2/..../....$rNxJWVHNv/mCNcgE/f6/L4zO6Fos5c2uTzhyzoisI62", ""},
{"$7$86....E....NaCl$xffjQo7Bm/.SKRS4B2EuynbOLjAmXU5AbDbRXhoBl64", "password"},
// cisco type 9 hashes. . They are $7$C/..../.... type (N=16384, r=1, p=1) different base-64 (same as WPA). salt used RAW
{"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
{"$9$cvWdfQlRRDKq/U$VFTPha5VHTCbSgSUAo.nPoh50ZiXOw1zmljEjXkaq1g", "123456"},
{"$9$X9fA8mypebLFVj$Klp6X9hxNhkns0kwUIinvLRSIgWOvCwDhVTZqjsycyU", "JtR"},
// 3rd type ScryptKDF.pm format (we saw this in CMIYC 2013)
// Generate in perl with scrypt_hash($_[1],$salt,1<<$N,$r,$p,$bytes)
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
// For this hash we replace the default ':' chars in the hash with '*' so they will end up as 1
// field, and change the SCRYPT into $ScryptKDF.pm$. So this hash
// SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=
// gets change into (by ScryptKDF2john)
// $ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=
// and then in prepare, this becomes (which is canonical for this format)
// $7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02
{"$ScryptKDF.pm$16384*8*1*bjZkemVmZ3lWVi42*cmBflTPsqGIbg9ZIJRTQdbic8OCUH+904TFmNPBkuEA=","test123"},
{"$ScryptKDF.pm$16384*8*1*VlVYUzBhQmlNbk5J*bJhm6VUS2UQRwMRqLTvSsljDeq193Ge4aqQDtb94bKg=","hello"},
{"$ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0BhlHpZJ3J2jcozCDM7t+sfjkgQ894R+f+ldVWM5atlkA==","password"},
{NULL}
};
// from crypt_scrypt-common.c (removed static from that file on these 3 functions)
extern const uint8_t * decode64_uint32(uint32_t * dst, uint32_t dstbits, const uint8_t * src);
extern uint8_t * encode64_uint32(uint8_t * dst, size_t dstlen, uint32_t src, uint32_t srcbits);
extern int decode64_one(uint32_t * dst, uint8_t src);
static int max_threads;
static escrypt_local_t *local;
static char saved_salt[SALT_SIZE];
static struct {
char key[PLAINTEXT_LENGTH + 1];
char out[BINARY_SIZE];
} *buffer;
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
max_threads = omp_get_max_threads();
self->params.min_keys_per_crypt *= max_threads;
self->params.max_keys_per_crypt *= max_threads;
#else
max_threads = 1;
#endif
local = mem_alloc(sizeof(*local) * max_threads);
for (i = 0; i < max_threads; i++)
escrypt_init_local(&local[i]);
buffer = mem_alloc(sizeof(*buffer) * self->params.max_keys_per_crypt);
}
static char N_to_c(int N) {
int b=0;
while (N>>=1) ++b;
return itoa64[b];
}
static char *prepare(char *fields[10], struct fmt_main *self)
{
static char Buf[256];
char tmp[512], tmp2[512], tmp4[256], tmp5[6], tmp6[6], *cp, *cp2;
int N, r, p;
if (!strncmp(fields[1], FMT_CISCO9, FMT_CISCO9_LEN)) {
// cisco type 9 hashes. scrypt params: N=16384, r=1, p=1 hash in crypt format. Change it to CryptBS.
// salt is 14 byte RAW, we can use it as is.
//from: {"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"},
//to: {"$7$C/..../....nhEmQVczB7dqsO$AG.yl8LDCkiErlh4ttizmxYCXSiXYrNY6vKmLDKj/P4", "cisco"},
if (strlen(fields[1]) != 4+14+43)
return fields[1];
N=1<<14; r=1; p=1;
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
sprintf(Buf, "%s%c%s%s%14.14s$%s", FMT_TAG7, N_to_c(N), tmp5, tmp6, &(fields[1][3]),
base64_convert_cp(&(fields[1][3+14+1]), e_b64_crypt, 43, tmp, e_b64_cryptBS, sizeof(tmp), flg_Base64_NO_FLAGS, 0));
}
else if (!strncmp(fields[1], FMT_SCRYPTKDF, FMT_SCRYPTKDF_LEN))
{
// ScryptKDF.pm (perl) format scrypt, generated by: scrypt_hash($_[1],$salt,$N,$r,$p,$bytes); Since N, r, p
// AND bytes are variable, we have to handle computing all of them. NOTE, we may have to make changes to
// the crypto_scrypt-common.c to handle the variable number of bytes.
// to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p
//from: {"$ScryptKDF.pm$*16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=","password"},
//to: {"$7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02","password"},
int N, r, p;
if (strlen(fields[1]) > sizeof(tmp)+FMT_SCRYPTKDF_LEN)
return fields[1];
strcpy(tmp, &fields[1][FMT_SCRYPTKDF_LEN]);
cp = strtokm(tmp, "*");
if (!cp || !isdec(cp)) return fields[1];
N = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp || !isdec(cp)) return fields[1];
r = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp || !isdec(cp)) return fields[1];
p = atoi(cp);
cp = strtokm(NULL, "*");
if (!cp)
return fields[1];
cp2 = strtokm(NULL, "*");
if (!cp2)
return fields[1];
if (base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0) != strlen(cp))
return fields[1];
if (base64_valid_length(cp2, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0) != strlen(cp2))
return fields[1];
encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30);
tmp5[5]=0;
encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30);
tmp6[5]=0;
memset(tmp4, 0, sizeof(tmp4));
base64_convert_cp(cp, e_b64_mime, strlen(cp), tmp4, e_b64_raw, sizeof(tmp4), flg_Base64_NO_FLAGS, 0);
memset(tmp2, 0, sizeof(tmp2));
base64_convert_cp(cp2, e_b64_mime, strlen(cp2), tmp2, e_b64_cryptBS, sizeof(tmp2),flg_Base64_NO_FLAGS, 0);
cp = &tmp2[strlen(tmp2)-1];
while (cp > tmp2 && *cp == '.') *cp-- = 0;
cp = &tmp4[strlen(tmp)-1];
while (cp > tmp4 && *cp == '.') *cp-- = 0;
sprintf(Buf, "%s%c%s%s%s$%s", FMT_TAG7, N_to_c(N), tmp5, tmp6, tmp4, tmp2);
} else
return fields[1];
return Buf;
}
static void done(void)
{
int i;
for (i = 0; i < max_threads; i++)
escrypt_free_local(&local[i]);
MEM_FREE(local);
MEM_FREE(buffer);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int length;
unsigned tmp;
if (strncmp(ciphertext, FMT_TAG7, FMT_TAG7_LEN))
return 0;
for (p = ciphertext + FMT_TAG7_LEN; p < ciphertext + (FMT_TAG7_LEN + 1 + 5 + 5); p++)
if (atoi64[ARCH_INDEX(*p)] == 0x7F)
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
if (p - ciphertext > BINARY_SIZE - (1 + 43))
return 0;
++p;
length = base64_valid_length(p, e_b64_cryptBS, flg_Base64_NO_FLAGS, 0);
decode64_one(&tmp, ciphertext[3]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4]);
if (!tmp)
return 0;
decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4+5]);
if (!tmp)
return 0;
// we want the hash to use 32 bytes OR more. 43 base64 bytes is 32 raw bytes
return p[length] == 0 && length >= 43;
}
static void *get_binary(char *ciphertext)
{
static char out[BINARY_SIZE];
strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */
return out;
}
static void *get_salt(char *ciphertext)
{
static char out[SALT_SIZE];
char *cp;
/* NUL padding is required */
memset(out, 0, sizeof(out));
if (strlen(ciphertext) > SALT_SIZE-1)
memcpy(out, ciphertext, SALT_SIZE-1);
else
strcpy(out, ciphertext);
cp = strchr(&out[8], '$');
while (cp && *cp) {
*cp++ = 0;
}
return out;
}
#define H(s, i) \
((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1]))
/*
* original Hx() macros simple looked at length-2 (last byte, and last byte -2)
* now we look at bytes 40 and 38 from the hash, so that longer hashes can
* be compared to shorter ones. The last byte may be different, so we
* do NOT use that one. This new method works for any number of bytes in
* the scrypt 32 or more.
#define H0(s) \
int i = strlen(s) - 2; \
return i > 0 ? H((s), i) & 0xF : 0
*/
#define H0(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 0 ? H((s), i) & 0xF : 0
#define H1(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & 0xFF : 0
#define H2(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & 0xFFF : 0
#define H3(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10)) & 0xFFFF : 0
#define H4(s) \
char *cp = strrchr(s,'$')+40; \
int i = cp-s; \
return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & 0xFFFFF : 0
static int binary_hash_0(void *binary)
{
H0((char *)binary);
}
static int binary_hash_1(void *binary)
{
H1((char *)binary);
}
static int binary_hash_2(void *binary)
{
H2((char *)binary);
}
static int binary_hash_3(void *binary)
{
H3((char *)binary);
}
static int binary_hash_4(void *binary)
{
H4((char *)binary);
}
static int get_hash_0(int index)
{
H0(buffer[index].out);
}
static int get_hash_1(int index)
{
H1(buffer[index].out);
}
static int get_hash_2(int index)
{
H2(buffer[index].out);
}
static int get_hash_3(int index)
{
H3(buffer[index].out);
}
static int get_hash_4(int index)
{
H4(buffer[index].out);
}
static int salt_hash(void *salt)
{
int i, h;
i = strlen((char *)salt) - 1;
if (i > 1) i--;
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i - 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])];
h ^= ((unsigned char *)salt)[i];
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void set_key(char *key, int index)
{
strnzcpy(buffer[index].key, key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return buffer[index].key;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
int failed = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(index) shared(count, failed, local, saved_salt, buffer)
#endif
for (index = 0; index < count; index++) {
uint8_t *hash;
hash = escrypt_r(&(local[index]),
(const uint8_t *)(buffer[index].key),
strlen(buffer[index].key),
(const uint8_t *)saved_salt,
(uint8_t *)&(buffer[index].out),
sizeof(buffer[index].out));
if (!hash) {
failed = 1;
buffer[index].out[0] = 0;
}
}
if (failed) {
fprintf(stderr, "scrypt memory allocation failed\n");
error();
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
// binary was created as 32 bytes. It will always be
// <= length of buffer.out. So we use the binary as
// our hash indication lentth (and avoid looking at last byte)
int len = strlen(buffer[0].out)-2;
for (index = 0; index < count; index++)
if (!strncmp((char *)binary, buffer[index].out, len))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
int len = strlen(buffer[index].out)-2;
return !strncmp((char *)binary, buffer[index].out,len);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int tunable_cost_N(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint64_t N;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
N = (uint64_t)1 << N_log2;
}
return (unsigned int) N;
}
static unsigned int tunable_cost_r(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
return (unsigned int) r;
}
static unsigned int tunable_cost_p(void *salt)
{
const uint8_t * setting;
const uint8_t * src;
uint32_t r, p;
setting = salt;
if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$')
return 0;
src = setting + 3;
{
uint32_t N_log2;
if (decode64_one(&N_log2, *src))
return 0;
src++;
}
src = decode64_uint32(&r, 30, src);
if (!src)
return 0;
src = decode64_uint32(&p, 30, src);
if (!src)
return 0;
return (unsigned int) p;
}
struct fmt_main fmt_scrypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"N",
"r",
"p"
},
{ FMT_TAG7, FMT_CISCO9, FMT_SCRYPTKDF },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_N,
tunable_cost_r,
tunable_cost_p
},
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
GB_unaryop__ainv_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_bool
// op(A') function: GB_tran__ainv_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Main.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 19;
int mype = 0;
double omp_start, omp_end;
int nprocs = 1;
unsigned long long verification;
#ifdef MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// Process CLI Fields -- store in "Inputs" structure
Inputs in = read_CLI( argc, argv );
// Set number of OpenMP Threads
#ifdef OPENMP
omp_set_num_threads(in.nthreads);
#endif
// Print-out of Input Summary
if( mype == 0 )
print_inputs( in, nprocs, version );
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// This is not reflective of a real Monte Carlo simulation workload,
// therefore, do not profile this region!
// =====================================================================
SimulationData SD;
// If read from file mode is selected, skip initialization and load
// all simulation data structures from file instead
if( in.binary_mode == READ )
SD = binary_read(in);
else
SD = grid_init_do_not_profile( in, mype );
// If writing from file mode is selected, write all simulation data
// structures to file
if( in.binary_mode == WRITE && mype == 0 )
binary_write(in, SD);
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation
// This is the section that should be profiled, as it reflects a
// realistic continuous energy Monte Carlo macroscopic cross section
// lookup kernel.
// =====================================================================
if( mype == 0 )
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
// Start Simulation Timer
omp_start = get_time();
// Run simulation
if( in.simulation_method == EVENT_BASED )
{
if( in.kernel_id == 0 )
verification = run_event_based_simulation(in, SD, mype);
else if( in.kernel_id == 1 )
verification = run_event_based_simulation_optimization_1(in, SD, mype);
else
{
printf("Error: No kernel ID %d found!\n", in.kernel_id);
exit(1);
}
}
else
verification = run_history_based_simulation(in, SD, mype);
if( mype == 0)
{
printf("\n" );
printf("Simulation complete.\n" );
}
// End Simulation Timer
omp_end = get_time();
// =====================================================================
// Output Results & Finalize
// =====================================================================
// Final Hash Step
verification = verification % 999983;
// Print / Save Results and Exit
int is_invalid_result = print_results( in, mype, omp_end-omp_start, nprocs, verification );
#ifdef MPI
MPI_Finalize();
#endif
return is_invalid_result;
}
//io.c
// Prints program logo
void logo(int version)
{
border_print();
printf(
" __ __ ___________ _ \n"
" \\ \\ / // ___| ___ \\ | | \n"
" \\ V / \\ `--.| |_/ / ___ _ __ ___| |__ \n"
" / \\ `--. \\ ___ \\/ _ \\ '_ \\ / __| '_ \\ \n"
" / /^\\ \\/\\__/ / |_/ / __/ | | | (__| | | | \n"
" \\/ \\/\\____/\\____/ \\___|_| |_|\\___|_| |_| \n\n"
);
border_print();
center_print("Developed at Argonne National Laboratory", 79);
char v[100];
sprintf(v, "Version: %d", version);
center_print(v, 79);
border_print();
}
// Prints Section titles in center of 80 char terminal
void center_print(const char *s, int width)
{
int length = strlen(s);
int i;
for (i=0; i<=(width-length)/2; i++) {
fputs(" ", stdout);
}
fputs(s, stdout);
fputs("\n", stdout);
}
int print_results( Inputs in, int mype, double runtime, int nprocs,
unsigned long long vhash )
{
// Calculate Lookups per sec
int lookups = 0;
if( in.simulation_method == HISTORY_BASED )
lookups = in.lookups * in.particles;
else if( in.simulation_method == EVENT_BASED )
lookups = in.lookups;
int lookups_per_sec = (int) ((double) lookups / runtime);
// If running in MPI, reduce timing statistics and calculate average
#ifdef MPI
int total_lookups = 0;
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&lookups_per_sec, &total_lookups, 1, MPI_INT,
MPI_SUM, 0, MPI_COMM_WORLD);
#endif
int is_invalid_result = 1;
// Print output
if( mype == 0 )
{
border_print();
center_print("RESULTS", 79);
border_print();
// Print the results
printf("Threads: %d\n", in.nthreads);
#ifdef MPI
printf("MPI ranks: %d\n", nprocs);
#endif
#ifdef MPI
printf("Total Lookups/s: ");
fancy_int(total_lookups);
printf("Avg Lookups/s per MPI rank: ");
fancy_int(total_lookups / nprocs);
#else
printf("Runtime: %.3lf seconds\n", runtime);
printf("Lookups: "); fancy_int(lookups);
printf("Lookups/s: ");
fancy_int(lookups_per_sec);
#endif
}
unsigned long long large = 0;
unsigned long long small = 0;
if( in.simulation_method == EVENT_BASED )
{
small = 945990;
large = 952131;
}
else if( in.simulation_method == HISTORY_BASED )
{
small = 941535;
large = 954318;
}
if( strcmp(in.HM, "large") == 0 )
{
if( vhash == large )
is_invalid_result = 0;
}
else if( strcmp(in.HM, "small") == 0 )
{
if( vhash == small )
is_invalid_result = 0;
}
if(mype == 0 )
{
if( is_invalid_result )
printf("Verification checksum: %llu (WARNING - INAVALID CHECKSUM!)\n", vhash);
else
printf("Verification checksum: %llu (Valid)\n", vhash);
border_print();
}
return is_invalid_result;
}
void print_inputs(Inputs in, int nprocs, int version )
{
// Calculate Estimate of Memory Usage
int mem_tot = estimate_mem_usage( in );
logo(version);
center_print("INPUT SUMMARY", 79);
border_print();
if( in.simulation_method == EVENT_BASED )
printf("Simulation Method: Event Based\n");
else
printf("Simulation Method: History Based\n");
if( in.grid_type == NUCLIDE )
printf("Grid Type: Nuclide Grid\n");
else if( in.grid_type == UNIONIZED )
printf("Grid Type: Unionized Grid\n");
else
printf("Grid Type: Hash\n");
printf("Materials: %d\n", 12);
printf("H-M Benchmark Size: %s\n", in.HM);
printf("Total Nuclides: %ld\n", in.n_isotopes);
printf("Gridpoints (per Nuclide): ");
fancy_int(in.n_gridpoints);
if( in.grid_type == HASH )
{
printf("Hash Bins: ");
fancy_int(in.hash_bins);
}
if( in.grid_type == UNIONIZED )
{
printf("Unionized Energy Gridpoints: ");
fancy_int(in.n_isotopes*in.n_gridpoints);
}
if( in.simulation_method == HISTORY_BASED )
{
printf("Particle Histories: "); fancy_int(in.particles);
printf("XS Lookups per Particle: "); fancy_int(in.lookups);
}
printf("Total XS Lookups: "); fancy_int(in.lookups);
#ifdef MPI
printf("MPI Ranks: %d\n", nprocs);
printf("OMP Threads per MPI Rank: %d\n", in.nthreads);
printf("Mem Usage per MPI Rank (MB): "); fancy_int(mem_tot);
#else
printf("Threads: %d\n", in.nthreads);
printf("Est. Memory Usage (MB): "); fancy_int(mem_tot);
#endif
printf("Binary File Mode: ");
if( in.binary_mode == NONE )
printf("Off\n");
else if( in.binary_mode == READ)
printf("Read\n");
else
printf("Write\n");
border_print();
center_print("INITIALIZATION - DO NOT PROFILE", 79);
border_print();
}
void border_print(void)
{
printf(
"==================================================================="
"=============\n");
}
// Prints comma separated integers - for ease of reading
void fancy_int( long a )
{
if( a < 1000 )
printf("%ld\n",a);
else if( a >= 1000 && a < 1000000 )
printf("%ld,%03ld\n", a / 1000, a % 1000);
else if( a >= 1000000 && a < 1000000000 )
printf("%ld,%03ld,%03ld\n",a / 1000000,(a % 1000000) / 1000,a % 1000 );
else if( a >= 1000000000 )
printf("%ld,%03ld,%03ld,%03ld\n",
a / 1000000000,
(a % 1000000000) / 1000000,
(a % 1000000) / 1000,
a % 1000 );
else
printf("%ld\n",a);
}
void print_CLI_error(void)
{
printf("Usage: ./XSBench <options>\n");
printf("Options include:\n");
printf(" -m <simulation method> Simulation method (history, event)\n");
printf(" -t <threads> Number of OpenMP threads to run\n");
printf(" -s <size> Size of H-M Benchmark to run (small, large, XL, XXL)\n");
printf(" -g <gridpoints> Number of gridpoints per nuclide (overrides -s defaults)\n");
printf(" -G <grid type> Grid search type (unionized, nuclide, hash). Defaults to unionized.\n");
printf(" -p <particles> Number of particle histories\n");
printf(" -l <lookups> History Based: Number of Cross-section (XS) lookups per particle. Event Based: Total number of XS lookups.\n");
printf(" -h <hash bins> Number of hash bins (only relevant when used with \"-G hash\")\n");
printf(" -b <binary mode> Read or write all data structures to file. If reading, this will skip initialization phase. (read, write)\n");
printf(" -k <kernel ID> Specifies which kernel to run. 0 is baseline, 1, 2, etc are optimized variants. (0 is default.)\n");
printf("Default is equivalent to: -m history -s large -l 34 -p 500000 -G unionized\n");
printf("See readme for full description of default run values\n");
exit(4);
}
Inputs read_CLI( int argc, char * argv[] )
{
Inputs input;
// defaults to the history based simulation method
input.simulation_method = HISTORY_BASED;
// defaults to max threads on the system
#ifdef OPENMP
input.nthreads = omp_get_num_procs();
#else
input.nthreads = 1;
#endif
// defaults to 355 (corresponding to H-M Large benchmark)
input.n_isotopes = 355;
// defaults to 11303 (corresponding to H-M Large benchmark)
input.n_gridpoints = 11303;
// defaults to 500,000
input.particles = 500000;
// defaults to 34
input.lookups = 34;
// default to unionized grid
input.grid_type = UNIONIZED;
// default to unionized grid
input.hash_bins = 10000;
// default to no binary read/write
input.binary_mode = NONE;
// defaults to baseline kernel
input.kernel_id = 0;
// defaults to H-M Large benchmark
input.HM = (char *) malloc( 6 * sizeof(char) );
input.HM[0] = 'l' ;
input.HM[1] = 'a' ;
input.HM[2] = 'r' ;
input.HM[3] = 'g' ;
input.HM[4] = 'e' ;
input.HM[5] = '\0';
// Check if user sets these
int user_g = 0;
int default_lookups = 1;
int default_particles = 1;
// Collect Raw Input
for( int i = 1; i < argc; i++ )
{
char * arg = argv[i];
// nthreads (-t)
if( strcmp(arg, "-t") == 0 )
{
if( ++i < argc )
input.nthreads = atoi(argv[i]);
else
print_CLI_error();
}
// n_gridpoints (-g)
else if( strcmp(arg, "-g") == 0 )
{
if( ++i < argc )
{
user_g = 1;
input.n_gridpoints = atol(argv[i]);
}
else
print_CLI_error();
}
// Simulation Method (-m)
else if( strcmp(arg, "-m") == 0 )
{
char * sim_type;
if( ++i < argc )
sim_type = argv[i];
else
print_CLI_error();
if( strcmp(sim_type, "history") == 0 )
input.simulation_method = HISTORY_BASED;
else if( strcmp(sim_type, "event") == 0 )
{
input.simulation_method = EVENT_BASED;
// Also resets default # of lookups
if( default_lookups && default_particles )
{
input.lookups = input.lookups * input.particles;
input.particles = 0;
}
}
else
print_CLI_error();
}
// lookups (-l)
else if( strcmp(arg, "-l") == 0 )
{
if( ++i < argc )
{
input.lookups = atoi(argv[i]);
default_lookups = 0;
}
else
print_CLI_error();
}
// hash bins (-h)
else if( strcmp(arg, "-h") == 0 )
{
if( ++i < argc )
input.hash_bins = atoi(argv[i]);
else
print_CLI_error();
}
// particles (-p)
else if( strcmp(arg, "-p") == 0 )
{
if( ++i < argc )
{
input.particles = atoi(argv[i]);
default_particles = 0;
}
else
print_CLI_error();
}
// HM (-s)
else if( strcmp(arg, "-s") == 0 )
{
if( ++i < argc )
input.HM = argv[i];
else
print_CLI_error();
}
// grid type (-G)
else if( strcmp(arg, "-G") == 0 )
{
char * grid_type;
if( ++i < argc )
grid_type = argv[i];
else
print_CLI_error();
if( strcmp(grid_type, "unionized") == 0 )
input.grid_type = UNIONIZED;
else if( strcmp(grid_type, "nuclide") == 0 )
input.grid_type = NUCLIDE;
else if( strcmp(grid_type, "hash") == 0 )
input.grid_type = HASH;
else
print_CLI_error();
}
// binary mode (-b)
else if( strcmp(arg, "-b") == 0 )
{
char * binary_mode;
if( ++i < argc )
binary_mode = argv[i];
else
print_CLI_error();
if( strcmp(binary_mode, "read") == 0 )
input.binary_mode = READ;
else if( strcmp(binary_mode, "write") == 0 )
input.binary_mode = WRITE;
else
print_CLI_error();
}
// kernel optimization selection (-k)
else if( strcmp(arg, "-k") == 0 )
{
if( ++i < argc )
{
input.kernel_id = atoi(argv[i]);
}
else
print_CLI_error();
}
else
print_CLI_error();
}
// Validate Input
// Validate nthreads
if( input.nthreads < 1 )
print_CLI_error();
// Validate n_isotopes
if( input.n_isotopes < 1 )
print_CLI_error();
// Validate n_gridpoints
if( input.n_gridpoints < 1 )
print_CLI_error();
// Validate lookups
if( input.lookups < 1 )
print_CLI_error();
// Validate Hash Bins
if( input.hash_bins < 1 )
print_CLI_error();
// Validate HM size
if( strcasecmp(input.HM, "small") != 0 &&
strcasecmp(input.HM, "large") != 0 &&
strcasecmp(input.HM, "XL") != 0 &&
strcasecmp(input.HM, "XXL") != 0 )
print_CLI_error();
// Set HM size specific parameters
// (defaults to large)
if( strcasecmp(input.HM, "small") == 0 )
input.n_isotopes = 68;
else if( strcasecmp(input.HM, "XL") == 0 && user_g == 0 )
input.n_gridpoints = 238847; // sized to make 120 GB XS data
else if( strcasecmp(input.HM, "XXL") == 0 && user_g == 0 )
input.n_gridpoints = 238847 * 2.1; // 252 GB XS data
// Return input struct
return input;
}
void binary_write( Inputs in, SimulationData SD )
{
char * fname = "XS_data.dat";
printf("Writing all data structures to binary file %s...\n", fname);
FILE * fp = fopen(fname, "w");
// Write SimulationData Object. Include pointers, even though we won't be using them.
fwrite(&SD, sizeof(SimulationData), 1, fp);
// Write heap arrays in SimulationData Object
fwrite(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp);
fwrite(SD.concs, sizeof(double), SD.length_concs, fp);
fwrite(SD.mats, sizeof(int), SD.length_mats, fp);
fwrite(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp);
fwrite(SD.index_grid, sizeof(int), SD.length_index_grid, fp);
fwrite(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp);
fclose(fp);
}
SimulationData binary_read( Inputs in )
{
SimulationData SD;
char * fname = "XS_data.dat";
printf("Reading all data structures from binary file %s...\n", fname);
FILE * fp = fopen(fname, "r");
assert(fp != NULL);
// Read SimulationData Object. Include pointers, even though we won't be using them.
fread(&SD, sizeof(SimulationData), 1, fp);
// Allocate space for arrays on heap
SD.num_nucs = (int *) malloc(SD.length_num_nucs * sizeof(int));
SD.concs = (double *) malloc(SD.length_concs * sizeof(double));
SD.mats = (int *) malloc(SD.length_mats * sizeof(int));
SD.nuclide_grid = (NuclideGridPoint *) malloc(SD.length_nuclide_grid * sizeof(NuclideGridPoint));
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double));
// Read heap arrays into SimulationData Object
fread(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp);
fread(SD.concs, sizeof(double), SD.length_concs, fp);
fread(SD.mats, sizeof(int), SD.length_mats, fp);
fread(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp);
fread(SD.index_grid, sizeof(int), SD.length_index_grid, fp);
fread(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp);
fclose(fp);
return SD;
}
//Simulation.c
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor CPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning event based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation Loop
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
#pragma omp parallel for schedule(dynamic,100) reduction(+:verification)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
return verification;
}
unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning history based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
// Begin outer lookup loop over particles. This loop is independent.
#pragma omp parallel for schedule(dynamic, 100) reduction(+:verification)
for( int p = 0; p < in.particles; p++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup, and
// we may fast forward up to 5 times after each lookup)
seed = fast_forward_LCG(seed, p*in.lookups*2*5);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Inner XS Lookup Loop
// This loop is dependent!
// i.e., Next iteration uses data computed in previous iter.
for( int i = 0; i < in.lookups; i++ )
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices for each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookups)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on it. For other accelerators,
// a different approach might be required (e.g., atomics, reduction
// of thread-specific values in large array via CUDA thrust, etc)
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
// Randomly pick next energy and material for the particle
// Also incorporates results from macro_xs lookup to
// enforce loop dependency.
// In a real MC app, this dependency is expressed in terms
// of branching physics sampling, whereas here we are just
// artificially enforcing this dependence based on fast
// forwarding the LCG state
uint64_t n_forward = 0;
for( int j = 0; j < 5; j++ )
if( macro_xs_vector[j] > 1.0 )
n_forward++;
if( n_forward > 0 )
seed = fast_forward_LCG(seed, n_forward);
p_energy = LCG_random_double(&seed);
mat = pick_mat(&seed);
}
}
return verification;
}
// Calculates the microscopic cross section for a given nuclide & energy
void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
long idx, double * restrict xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * restrict num_nucs,
double * restrict concs,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
int * restrict mats,
double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
}
// binary search for energy on unionized energy grid
// returns lower index
long grid_search( long n, double quarry, double * restrict A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies.
// By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
//
// As fast parallel sorting will be required for these optimizations, we will
// first define a set of key-value parallel quicksort routines.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Parallel Quicksort Key-Value Sorting Algorithms
////////////////////////////////////////////////////////////////////////////////////
//
// These algorithms are based on the parallel quicksort implementation by
// Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel
//
// Eduard's original version was for an integer type quicksort, but I have modified
// it to form two different versions that can sort key-value pairs together without
// having to bundle them into a separate object. Additionally, I have modified the
// optimal chunk sizes and restricted the number of threads for the array sizing
// that XSBench will be using by default.
//
// Eduard's original implementation carries the following license, which applies to
// the following functions only:
//
// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
// void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads)
// void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
// void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads)
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Eduard López
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////////
void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
{
int i = left, j = right;
int tmp;
int pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
double tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff);
}
}
}
void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
{
int i = left, j = right;
double tmp;
double pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
int tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff);
}
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting
// lookups by material and energy
////////////////////////////////////////////////////////////////////////////////////
// This kernel separates out the sampling and lookup regions of the event-based
// model, and then sorts the lookups by material type and energy. The goal of this
// optimization is to allow for greatly improved cache locality, and XS indices
// loaded from memory may be re-used for multiple lookups.
//
// As efficienct sorting is key for performance, we also must implement an
// efficient key-value parallel sorting algorithm. We also experimented with using
// the C++ version of thrust for these purposes, but found that our own implemtation
// was slightly faster than the thrust library version, so for speed and
// simplicity we will do not add the thrust dependency.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype)
{
char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional data required by optimized kernel...\n");
size_t sz;
size_t total_sz = 0;
double start, stop;
sz = in.lookups * sizeof(double);
SD.p_energy_samples = (double *) malloc(sz);
total_sz += sz;
SD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
SD.mat_samples = (int *) malloc(sz);
total_sz += sz;
SD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Sample Materials and Energies
////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, 100)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
SD.p_energy_samples[i] = p_energy;
SD.mat_samples[i] = mat;
}
if(mype == 0) printf("finished sampling...\n");
////////////////////////////////////////////////////////////////////////////////
// Sort by Material
////////////////////////////////////////////////////////////////////////////////
start = get_time();
quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads);
stop = get_time();
if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Sort by Energy
////////////////////////////////////////////////////////////////////////////////
start = get_time();
// Count up number of each type of sample.
int num_samples_per_mat[12] = {0};
for( int l = 0; l < in.lookups; l++ )
num_samples_per_mat[ SD.mat_samples[l] ]++;
// Determine offsets
int offsets[12] = {0};
for( int m = 1; m < 12; m++ )
offsets[m] = offsets[m-1] + num_samples_per_mat[m-1];
stop = get_time();
if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start);
start = stop;
// Sort each material type by energy level
int offset = 0;
for( int m = 0; m < 12; m++ )
quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads);
stop = get_time();
if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Perform lookups for each material separately
////////////////////////////////////////////////////////////////////////////////
start = get_time();
unsigned long long verification = 0;
// Individual Materials
offset = 0;
for( int m = 0; m < 12; m++ )
{
#pragma omp parallel for schedule(dynamic,100) reduction(+:verification)
for( int i = offset; i < offset + num_samples_per_mat[m]; i++)
{
// load pre-sampled energy and material for the particle
double p_energy = SD.p_energy_samples[i];
int mat = SD.mat_samples[i];
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
offset += num_samples_per_mat[m];
}
stop = get_time();
if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start);
return verification;
}
//GridInit.c
SimulationData grid_init_do_not_profile( Inputs in, int mype )
{
// Structure to hold all allocated simuluation data arrays
SimulationData SD;
// Keep track of how much data we're allocating
size_t nbytes = 0;
// Set the initial seed value
uint64_t seed = 42;
////////////////////////////////////////////////////////////////////
// Initialize Nuclide Grids
////////////////////////////////////////////////////////////////////
if(mype == 0) printf("Intializing nuclide grids...\n");
// First, we need to initialize our nuclide grid. This comes in the form
// of a flattened 2D array that hold all the information we need to define
// the cross sections for all isotopes in the simulation.
// The grid is composed of "NuclideGridPoint" structures, which hold the
// energy level of the grid point and all associated XS data at that level.
// An array of structures (AOS) is used instead of
// a structure of arrays, as the grid points themselves are accessed in
// a random order, but all cross section interaction channels and the
// energy level are read whenever the gridpoint is accessed, meaning the
// AOS is more cache efficient.
// Initialize Nuclide Grid
SD.length_nuclide_grid = in.n_isotopes * in.n_gridpoints;
SD.nuclide_grid = (NuclideGridPoint *) malloc( SD.length_nuclide_grid * sizeof(NuclideGridPoint));
assert(SD.nuclide_grid != NULL);
nbytes += SD.length_nuclide_grid * sizeof(NuclideGridPoint);
for( int i = 0; i < SD.length_nuclide_grid; i++ )
{
SD.nuclide_grid[i].energy = LCG_random_double(&seed);
SD.nuclide_grid[i].total_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].elastic_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].absorbtion_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].fission_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].nu_fission_xs = LCG_random_double(&seed);
}
// Sort so that each nuclide has data stored in ascending energy order.
for( int i = 0; i < in.n_isotopes; i++ )
qsort( &SD.nuclide_grid[i*in.n_gridpoints], in.n_gridpoints, sizeof(NuclideGridPoint), NGP_compare);
// error debug check
/*
for( int i = 0; i < in.n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < in.n_gridpoints; j++ )
printf("E%d = %lf\n", j, SD.nuclide_grid[i * in.n_gridpoints + j].energy);
}
*/
////////////////////////////////////////////////////////////////////
// Initialize Acceleration Structure
////////////////////////////////////////////////////////////////////
if( in.grid_type == NUCLIDE )
{
SD.length_unionized_energy_array = 0;
SD.length_index_grid = 0;
}
if( in.grid_type == UNIONIZED )
{
if(mype == 0) printf("Intializing unionized grid...\n");
// Allocate space to hold the union of all nuclide energy data
SD.length_unionized_energy_array = in.n_isotopes * in.n_gridpoints;
SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double));
assert(SD.unionized_energy_array != NULL );
nbytes += SD.length_unionized_energy_array * sizeof(double);
// Copy energy data over from the nuclide energy grid
for( int i = 0; i < SD.length_unionized_energy_array; i++ )
SD.unionized_energy_array[i] = SD.nuclide_grid[i].energy;
// Sort unionized energy array
qsort( SD.unionized_energy_array, SD.length_unionized_energy_array, sizeof(double), double_compare);
// Allocate space to hold the acceleration grid indices
SD.length_index_grid = SD.length_unionized_energy_array * in.n_isotopes;
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
assert(SD.index_grid != NULL);
nbytes += SD.length_index_grid * sizeof(int);
// Generates the double indexing grid
int * idx_low = (int *) calloc( in.n_isotopes, sizeof(int));
assert(idx_low != NULL );
double * energy_high = (double *) malloc( in.n_isotopes * sizeof(double));
assert(energy_high != NULL );
for( int i = 0; i < in.n_isotopes; i++ )
energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + 1].energy;
for( long e = 0; e < SD.length_unionized_energy_array; e++ )
{
double unionized_energy = SD.unionized_energy_array[e];
for( long i = 0; i < in.n_isotopes; i++ )
{
if( unionized_energy < energy_high[i] )
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
else if( idx_low[i] == in.n_gridpoints - 2 )
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
else
{
idx_low[i]++;
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + idx_low[i] + 1].energy;
}
}
}
free(idx_low);
free(energy_high);
}
if( in.grid_type == HASH )
{
if(mype == 0) printf("Intializing hash grid...\n");
SD.length_unionized_energy_array = 0;
SD.length_index_grid = in.hash_bins * in.n_isotopes;
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
assert(SD.index_grid != NULL);
nbytes += SD.length_index_grid * sizeof(int);
double du = 1.0 / in.hash_bins;
// For each energy level in the hash table
#pragma omp parallel for
for( long e = 0; e < in.hash_bins; e++ )
{
double energy = e * du;
// We need to determine the bounding energy levels for all isotopes
for( long i = 0; i < in.n_isotopes; i++ )
{
SD.index_grid[e * in.n_isotopes + i] = grid_search_nuclide( in.n_gridpoints, energy, SD.nuclide_grid + i * in.n_gridpoints, 0, in.n_gridpoints-1);
}
}
}
////////////////////////////////////////////////////////////////////
// Initialize Materials and Concentrations
////////////////////////////////////////////////////////////////////
if(mype == 0) printf("Intializing material data...\n");
// Set the number of nuclides in each material
SD.num_nucs = load_num_nucs(in.n_isotopes);
SD.length_num_nucs = 12; // There are always 12 materials in XSBench
// Intialize the flattened 2D grid of material data. The grid holds
// a list of nuclide indices for each of the 12 material types. The
// grid is allocated as a full square grid, even though not all
// materials have the same number of nuclides.
SD.mats = load_mats(SD.num_nucs, in.n_isotopes, &SD.max_num_nucs);
SD.length_mats = SD.length_num_nucs * SD.max_num_nucs;
// Intialize the flattened 2D grid of nuclide concentration data. The grid holds
// a list of nuclide concentrations for each of the 12 material types. The
// grid is allocated as a full square grid, even though not all
// materials have the same number of nuclides.
SD.concs = load_concs(SD.num_nucs, SD.max_num_nucs);
SD.length_concs = SD.length_mats;
if(mype == 0) printf("Intialization complete. Allocated %.0lf MB of data.\n", nbytes/1024.0/1024.0 );
return SD;
}
|
GB_unaryop__abs_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_bool
// op(A') function: GB_tran__abs_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_export.c | //------------------------------------------------------------------------------
// GB_export: export a matrix or vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// No conversion is done, except to convert to non-iso if requested. The
// matrix is exported in its current sparsity structure and by-row/by-col
// format.
#include "GB_export.h"
#define GB_FREE_ALL \
{ \
GB_FREE (&Ap_new, Ap_new_size) ; \
GB_FREE (&Ah_new, Ah_new_size) ; \
}
GrB_Info GB_export // export/unpack a matrix in any format
(
bool unpacking, // unpack if true, export and free if false
GrB_Matrix *A, // handle of matrix to export and free, or unpack
GrB_Type *type, // type of matrix to export
GrB_Index *vlen, // vector length
GrB_Index *vdim, // vector dimension
bool is_sparse_vector, // true if A is a sparse GrB_Vector
// the 5 arrays:
GrB_Index **Ap, // pointers
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index **Ah, // vector indices
GrB_Index *Ah_size, // size of Ah in bytes
int8_t **Ab, // bitmap
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index **Ai, // indices
GrB_Index *Ai_size, // size of Ai in bytes
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
// additional information for specific formats:
GrB_Index *nvals, // # of entries for bitmap format.
bool *jumbled, // if true, sparse/hypersparse may be jumbled.
GrB_Index *nvec, // size of Ah for hypersparse format.
// information for all formats:
int *sparsity, // hypersparse, sparse, bitmap, or full
bool *is_csc, // if true then matrix is by-column, else by-row
bool *iso, // if true then A is iso and only one entry is returned
// in Ax, regardless of nvals(A).
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
int64_t *Ap_new = NULL ; size_t Ap_new_size = 0 ;
int64_t *Ah_new = NULL ; size_t Ah_new_size = 0 ;
ASSERT (A != NULL) ;
GB_RETURN_IF_NULL_OR_FAULTY (*A) ;
ASSERT_MATRIX_OK (*A, "A to export", GB0) ;
ASSERT (!GB_ZOMBIES (*A)) ;
ASSERT (GB_JUMBLED_OK (*A)) ;
ASSERT (!GB_PENDING (*A)) ;
GB_RETURN_IF_NULL (type) ;
GB_RETURN_IF_NULL (vlen) ;
GB_RETURN_IF_NULL (vdim) ;
GB_RETURN_IF_NULL (Ax) ;
GB_RETURN_IF_NULL (Ax_size) ;
int s = GB_sparsity (*A) ;
switch (s)
{
case GxB_HYPERSPARSE :
GB_RETURN_IF_NULL (nvec) ;
GB_RETURN_IF_NULL (Ah) ; GB_RETURN_IF_NULL (Ah_size) ;
case GxB_SPARSE :
if (is_sparse_vector)
{
GB_RETURN_IF_NULL (nvals) ;
}
else
{
GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_size) ;
}
GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_size) ;
break ;
case GxB_BITMAP :
GB_RETURN_IF_NULL (nvals) ;
GB_RETURN_IF_NULL (Ab) ; GB_RETURN_IF_NULL (Ab_size) ;
case GxB_FULL :
break ;
default: ;
}
//--------------------------------------------------------------------------
// allocate new space for Ap and Ah if unpacking
//--------------------------------------------------------------------------
int64_t avdim = (*A)->vdim ;
int64_t plen_new, nvec_new ;
if (unpacking)
{
plen_new = (avdim == 0) ? 0 : 1 ;
nvec_new = (avdim == 1) ? 1 : 0 ;
Ap_new = GB_CALLOC (plen_new+1, int64_t, &(Ap_new_size)) ;
if (avdim > 1)
{
// A is sparse if avdim <= 1, hypersparse if avdim > 1
Ah_new = GB_CALLOC (1, int64_t, &(Ah_new_size)) ;
}
if (Ap_new == NULL || (avdim > 1 && Ah_new == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// ensure A is non-iso if requested, or export A as-is
//--------------------------------------------------------------------------
if (iso == NULL)
{
// ensure A is non-iso
// set A->iso = false OK
if ((*A)->iso)
{
GBURBLE ("(iso to non-iso export) ") ;
}
GB_OK (GB_convert_any_to_non_iso (*A, true, Context)) ;
ASSERT (!((*A)->iso)) ;
}
else
{
// do not convert the matrix; export A as-is, either iso or non-iso
(*iso) = (*A)->iso ;
if (*iso)
{
GBURBLE ("(iso export) ") ;
}
}
//--------------------------------------------------------------------------
// export the matrix
//--------------------------------------------------------------------------
(*type) = (*A)->type ;
(*vlen) = (*A)->vlen ;
(*vdim) = avdim ;
// export A->x
#ifdef GB_MEMDUMP
printf ("export A->x from memtable: %p\n", (*A)->x) ;
#endif
GB_Global_memtable_remove ((*A)->x) ;
(*Ax) = (*A)->x ; (*A)->x = NULL ;
(*Ax_size) = (*A)->x_size ;
switch (s)
{
case GxB_HYPERSPARSE :
(*nvec) = (*A)->nvec ;
// export A->h
#ifdef GB_MEMDUMP
printf ("export A->h from memtable: %p\n", (*A)->h) ;
#endif
GB_Global_memtable_remove ((*A)->h) ;
(*Ah) = (GrB_Index *) ((*A)->h) ; (*A)->h = NULL ;
(*Ah_size) = (*A)->h_size ;
case GxB_SPARSE :
if (jumbled != NULL)
{
(*jumbled) = (*A)->jumbled ;
}
// export A->p, unless A is a sparse vector in CSC format
if (is_sparse_vector)
{
(*nvals) = (*A)->p [1] ;
}
else
{
#ifdef GB_MEMDUMP
printf ("export A->p from memtable: %p\n", (*A)->p) ;
#endif
GB_Global_memtable_remove ((*A)->p) ;
(*Ap) = (GrB_Index *) ((*A)->p) ; (*A)->p = NULL ;
(*Ap_size) = (*A)->p_size ;
}
// export A->i
#ifdef GB_MEMDUMP
printf ("export A->i from memtable: %p\n", (*A)->i) ;
#endif
GB_Global_memtable_remove ((*A)->i) ;
(*Ai) = (GrB_Index *) ((*A)->i) ; (*A)->i = NULL ;
(*Ai_size) = (*A)->i_size ;
break ;
case GxB_BITMAP :
(*nvals) = (*A)->nvals ;
// export A->b
#ifdef GB_MEMDUMP
printf ("export A->b from memtable: %p\n", (*A)->b) ;
#endif
GB_Global_memtable_remove ((*A)->b) ;
(*Ab) = (*A)->b ; (*A)->b = NULL ;
(*Ab_size) = (*A)->b_size ;
case GxB_FULL :
default: ;
}
if (sparsity != NULL)
{
(*sparsity) = s ;
}
if (is_csc != NULL)
{
(*is_csc) = (*A)->is_csc ;
}
//--------------------------------------------------------------------------
// free or clear the GrB_Matrix
//--------------------------------------------------------------------------
if (unpacking)
{
// unpack: clear the matrix, leaving it hypersparse (or sparse if
// it is a vector (vdim of 1) or has vdim of zero)
GB_phbix_free (*A) ;
(*A)->plen = plen_new ;
(*A)->nvec = nvec_new ;
(*A)->p = Ap_new ; (*A)->p_size = Ap_new_size ;
(*A)->h = Ah_new ; (*A)->h_size = Ah_new_size ;
(*A)->magic = GB_MAGIC ;
ASSERT_MATRIX_OK (*A, "A unpacked", GB0) ;
}
else
{
// export: free the header of A, and A->p if A is a sparse GrB_Vector
GB_Matrix_free (A) ;
ASSERT ((*A) == NULL) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_binop__bor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint64)
// A*D function (colscale): GB (_AxD__bor_uint64)
// D*A function (rowscale): GB (_DxB__bor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint64)
// C=scalar+B GB (_bind1st__bor_uint64)
// C=scalar+B' GB (_bind1st_tran__bor_uint64)
// C=A+scalar GB (_bind2nd__bor_uint64)
// C=A'+scalar GB (_bind2nd_tran__bor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT64 || GxB_NO_BOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
functions.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "functions.h"
//compute a*b mod p safely
unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprod(aExpb, z, p);
z = modprod(z, z, p);
b /= 2;
}
return aExpb;
}
//returns either 0 or 1 randomly
unsigned int randomBit() {
return rand()%2;
}
//returns a random integer which is between 2^{n-1} and 2^{n}
unsigned int randXbitInt(unsigned int n) {
unsigned int r = 1;
for (unsigned int i=0; i<n-1; i++) {
r = r*2 + randomBit();
}
return r;
}
//tests for primality and return 1 if N is probably prime and 0 if N is composite
unsigned int isProbablyPrime(unsigned int N) {
if (N%2==0) return 0; //not interested in even numbers (including 2)
unsigned int NsmallPrimes = 168;
unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163,
167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251,
257, 263, 269, 271, 277, 281, 283, 293,
307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443,
449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557,
563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647,
653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757,
761, 769, 773, 787, 797, 809, 811, 821,
823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983,
991, 997};
//before using a probablistic primality check, check directly using the small primes list
for (unsigned int n=1;n<NsmallPrimes;n++) {
if (N==smallPrimeList[n]) return 1; //true
if (N%smallPrimeList[n]==0) return 0; //false
}
//if we're testing a large number switch to Miller-Rabin primality test
unsigned int r = 0;
unsigned int d = N-1;
while (d%2 == 0) {
d /= 2;
r += 1;
}
for (unsigned int n=0;n<NsmallPrimes;n++) {
unsigned int k = smallPrimeList[n];
unsigned int x = modExp(k,d,N);
if ((x==1) || (x==N-1)) continue;
for (unsigned int i=1;i<r-1;i++) {
x = modprod(x,x,N);
if (x == 1) return 0; //false
if (x == N-1) break;
}
// see whether we left the loop because x==N-1
if (x == N-1) continue;
return 0; //false
}
return 1; //true
}
//Finds a generator of Z_p using the assumption that p=2*q+1
unsigned int findGenerator(unsigned int p) {
unsigned int g;
unsigned int q = (p-1)/2;
do {
//make a random number 1<= g < p
g = randXbitInt(32)%p; //could also have passed n to findGenerator
} while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1));
return g;
}
void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g,
unsigned int *h, unsigned int *x) {
/* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number
which satisfies p=2*q+1 where q is also prime */
unsigned int q;
do {
*p = randXbitInt(n);
q = (*p-1)/2;
} while (!isProbablyPrime(*p) || !isProbablyPrime(q));
/* Use the fact that p=2*q+1 to quickly find a generator */
*g = findGenerator(*p);
//pick a secret key, x
*x = randXbitInt(n)%(*p);
//compute h
*h = modExp(*g,*x,*p);
printf("ElGamal Setup successful.\n");
printf("p = %u. \n", *p);
printf("g = %u is a generator of Z_%u \n", *g, *p);
printf("Secret key: x = %u \n", *x);
printf("h = g^x = %u\n", *h);
printf("\n");
}
void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int g, unsigned int h) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for(unsigned int i=0; i<Nints;i++) {
//pick y in Z_p randomly
unsigned int y;
do {
y = randXbitInt(32)%p;
} while (y==0); //dont allow y=0
//compute a = g^y
a[i] = modExp(g,y,p);
//compute s = h^y
unsigned int s = modExp(h,y,p);
//encrypt m by multiplying with s
m[i] = modprod(m[i],s,p);
}
}
void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int x) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for(unsigned int i=0; i<Nints;i++) {
//compute s = a^x
unsigned int s = modExp(a[i],x,p);
//compute s^{-1} = s^{p-2}
unsigned int invS = modExp(s,p-2,p);
//decrypt message by multplying by invS
m[i] = modprod(m[i],invS,p);
}
}
//Pad the end of string so its length is divisible by Nchars
// Assume there is enough allocated storage for the padded string
void padString(unsigned char* string, unsigned int charsPerInt)
{
/* Q1.2 Complete this function */
unsigned int terminator;
while ((strlen(string) % charsPerInt) != 0)
{
terminator = strlen(string);
string[terminator] = ' ';
}
string[strlen(string)] = '\0';
}
void convertStringToZ(unsigned char *string, unsigned int Nchars,
unsigned int *Z, unsigned int Nints)
{
/* Q1.3 Complete this function */
unsigned int cpi = Nchars/Nints;
#pragma omp parallel for
for(unsigned int i = 0; i < Nchars; i = i + cpi)
{
for (unsigned int j = 0; j < cpi; j++)
{
unsigned int notYet = (unsigned int)string[i+j]; //this is the int that is being prepared to shift
unsigned int readyToShift = notYet<<(j*8); //this is the casted integer that ends up being shifted
Z[i/cpi] = Z[i/cpi]^readyToShift; //this updates Z, based on the cpi number
}
}
/* Q2.2 Parallelize this function with OpenMP */
}
void convertZToString(unsigned int *Z, unsigned int Nints,
unsigned char *string, unsigned int Nchars)
{
/* Q1.4 Complete this function */
unsigned int cpi = Nchars/Nints;
#pragma omp parallel for
for(unsigned int i = 0; i < Nchars; i = i + cpi)
{
for (unsigned int j = 0; j < cpi; j++)
{
unsigned int tag = 0xFF; //this has an integer value of 255; this is the equivalent of a
//32 bit character with 24 zeros and eight ones
tag = tag << (j*8);
unsigned int tempLocation = Z[i/cpi] & tag; //this produces the bitwise and operation to recover the original character
tempLocation = tempLocation >> (j*8); //this will shift (2^8) bits
string[i+j] = (unsigned char)tempLocation; //this will cast it back into a character
}
}
/* Q2.2 Parallelize this function with OpenMP */
}
|
compute.c | #include <stdlib.h>
#include <omp.h>
#include "compute.h"
tsp_solution_t* compute(tsp_search_t* global_search, int ncores)
{
/**
* Expand the global_search list if the are too few nodes.
* This will only happen for small values of N.
* I don't think this will become the bottleneck.
*/
while (global_search->list->length > 0 && global_search->list->length < ncores)
tsp_search_iterate(global_search, TSP_SEARCH_BREADTH_FIRST);
tsp_search_t** all_local_searches = (tsp_search_t**) malloc(ncores * sizeof(tsp_search_t*));
for (int core = 0; core < ncores; core++)
{
all_local_searches[core] = (tsp_search_t*) malloc(sizeof(tsp_search_t));
all_local_searches[core]->problem = global_search->problem;
all_local_searches[core]->optimum = tsp_solution_cpy(global_search->optimum);
all_local_searches[core]->list = list_new(NULL);
}
int core = 0;
while (global_search->list->length)
{
list_enqueue(all_local_searches[core]->list, list_dequeue(global_search->list));
core = (core + 1) % ncores;
}
/**
* Expand search tree nodes depth-first so as to try and save some RAM.
* https://www.quora.com/Why-is-DFS-usually-more-space-efficient-than-BFS
*/
#pragma omp parallel num_threads(ncores)
{
int my_core = omp_get_thread_num();
tsp_search_t* my_local_search = all_local_searches[my_core];
while (my_local_search->list->length)
tsp_search_iterate(my_local_search, TSP_SEARCH_DEPTH_FIRST);
}
tsp_solution_t* global_optimum = all_local_searches[0]->optimum;
for (int core = 1; core < ncores; core++)
if (!global_optimum)
global_optimum = all_local_searches[core]->optimum;
else if (all_local_searches[core]->optimum)
if (all_local_searches[core]->optimum->cost < global_optimum->cost)
global_optimum = all_local_searches[core]->optimum;
if (global_search->optimum)
tsp_solution_del(global_search->optimum);
global_search->optimum = tsp_solution_cpy(global_optimum);
char my_local_optimum_string[TSP_SOLUTION_STRING_MAX];
tsp_solution_to_string(global_search->optimum, my_local_optimum_string);
for (int core = 0; core < ncores; core++)
{
if (all_local_searches[core]->optimum)
tsp_solution_del(all_local_searches[core]->optimum);
list_del(all_local_searches[core]->list);
free(all_local_searches[core]);
}
free(all_local_searches);
return global_search->optimum;
}
|
schedule.c | #include <stdio.h>
#include <omp.h>
#include <math.h>
int prime(int n)
{
int i = 2;
for (i; i <= sqrt(n); i++)
{
if (n%i == 0)
{
return 0;
}
}
return 1;
}
int main()
{
double t1,t2,time;
int i=0;
omp_set_num_threads(5);
t1=omp_get_wtime();
#pragma omp parallel private(i) default(shared)
#pragma omp schedule(static)
for (i=2;i<10000;i++)
{
if (prime(i))
{
i+=i;
}
// printf("%d",i);
}
t2=omp_get_wtime();
time = (t2-t1)*1000;
printf(" \nstatic schedule time = %10.5f milleseconds\n", time);
// ###################################################################
t1=omp_get_wtime();
#pragma omp parallel private(i) default(shared)
#pragma omp schedule(dynamic)
for (i=2;i<10000;i++)
{
if (prime(i))
{
i+=i;
}
// printf("%d",i);
}
t2=omp_get_wtime();
time = (t2-t1)*1000;
printf(" \ndynamic schedule time = %10.5f milleseconds\n", time);
// ##################################################################
t1=omp_get_wtime();
#pragma omp parallel private(i) default(shared)
#pragma omp schedule(guide)
for (i=2;i<10000;i++)
{
if (prime(i))
{
i+=i;
}
// printf("%d",i);
}
t2=omp_get_wtime();
time = (t2-t1)*1000;
printf(" \nguide schedule time = %10.5f milleseconds\n", time);
// ##################################################################
t1=omp_get_wtime();
#pragma omp parallel private(i) default(shared)
#pragma omp schedule(runtime)
for (i=2;i<10000;i++)
{
if (prime(i))
{
i+=i;
}
// printf("%d",i);
}
t2=omp_get_wtime();
time = (t2-t1)*1000;
printf(" \nruntime schedule time = %10.5f milleseconds\n", time);
} |
struct_matrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.43 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_StructMatrix class.
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* hypre_StructMatrixExtractPointerByIndex
* Returns pointer to data for stencil entry coresponding to
* `index' in `matrix'. If the index does not exist in the matrix's
* stencil, the NULL pointer is returned.
*--------------------------------------------------------------------------*/
double *
hypre_StructMatrixExtractPointerByIndex( hypre_StructMatrix *matrix,
HYPRE_Int b,
hypre_Index index )
{
hypre_StructStencil *stencil;
HYPRE_Int rank;
stencil = hypre_StructMatrixStencil(matrix);
rank = hypre_StructStencilElementRank( stencil, index );
if ( rank >= 0 )
return hypre_StructMatrixBoxData(matrix, b, rank);
else
return NULL; /* error - invalid index */
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixCreate
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_StructMatrixCreate( MPI_Comm comm,
hypre_StructGrid *grid,
hypre_StructStencil *user_stencil )
{
hypre_StructMatrix *matrix;
HYPRE_Int i;
matrix = hypre_CTAlloc(hypre_StructMatrix, 1);
hypre_StructMatrixComm(matrix) = comm;
hypre_StructGridRef(grid, &hypre_StructMatrixGrid(matrix));
hypre_StructMatrixUserStencil(matrix) =
hypre_StructStencilRef(user_stencil);
hypre_StructMatrixDataAlloced(matrix) = 1;
hypre_StructMatrixRefCount(matrix) = 1;
/* set defaults */
hypre_StructMatrixSymmetric(matrix) = 0;
hypre_StructMatrixConstantCoefficient(matrix) = 0;
for (i = 0; i < 6; i++)
{
hypre_StructMatrixNumGhost(matrix)[i] = hypre_StructGridNumGhost(grid)[i];
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixRef
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_StructMatrixRef( hypre_StructMatrix *matrix )
{
hypre_StructMatrixRefCount(matrix) ++;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixDestroy( hypre_StructMatrix *matrix )
{
HYPRE_Int i;
if (matrix)
{
hypre_StructMatrixRefCount(matrix) --;
if (hypre_StructMatrixRefCount(matrix) == 0)
{
if (hypre_StructMatrixDataAlloced(matrix))
{
hypre_SharedTFree(hypre_StructMatrixData(matrix));
}
hypre_CommPkgDestroy(hypre_StructMatrixCommPkg(matrix));
hypre_ForBoxI(i, hypre_StructMatrixDataSpace(matrix))
hypre_TFree(hypre_StructMatrixDataIndices(matrix)[i]);
hypre_TFree(hypre_StructMatrixDataIndices(matrix));
hypre_BoxArrayDestroy(hypre_StructMatrixDataSpace(matrix));
hypre_TFree(hypre_StructMatrixSymmElements(matrix));
hypre_StructStencilDestroy(hypre_StructMatrixUserStencil(matrix));
hypre_StructStencilDestroy(hypre_StructMatrixStencil(matrix));
hypre_StructGridDestroy(hypre_StructMatrixGrid(matrix));
hypre_TFree(matrix);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixInitializeShell
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixInitializeShell( hypre_StructMatrix *matrix )
{
hypre_StructGrid *grid;
HYPRE_Int ndim;
hypre_StructStencil *user_stencil;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
HYPRE_Int num_values;
HYPRE_Int *symm_elements;
HYPRE_Int constant_coefficient;
HYPRE_Int *num_ghost;
HYPRE_Int extra_ghost[] = {0, 0, 0, 0, 0, 0};
hypre_BoxArray *data_space;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *data_box;
HYPRE_Int **data_indices;
HYPRE_Int data_size;
HYPRE_Int data_box_volume;
HYPRE_Int i, j, d;
grid = hypre_StructMatrixGrid(matrix);
ndim = hypre_StructMatrixDim(matrix);
/*-----------------------------------------------------------------------
* Set up stencil and num_values:
*
* If the matrix is symmetric, then the stencil is a "symmetrized"
* version of the user's stencil. If the matrix is not symmetric,
* then the stencil is the same as the user's stencil.
*
* The `symm_elements' array is used to determine what data is
* explicitely stored (symm_elements[i] < 0) and what data does is
* not explicitely stored (symm_elements[i] >= 0), but is instead
* stored as the transpose coefficient at a neighboring grid point.
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixStencil(matrix) == NULL)
{
user_stencil = hypre_StructMatrixUserStencil(matrix);
if (hypre_StructMatrixSymmetric(matrix))
{
/* store only symmetric stencil entry data */
hypre_StructStencilSymmetrize(user_stencil, &stencil, &symm_elements);
num_values = ( hypre_StructStencilSize(stencil) + 1 ) / 2;
}
else
{
/* store all stencil entry data */
stencil = hypre_StructStencilRef(user_stencil);
num_values = hypre_StructStencilSize(stencil);
symm_elements = hypre_TAlloc(HYPRE_Int, num_values);
for (i = 0; i < num_values; i++)
{
symm_elements[i] = -1;
}
}
hypre_StructMatrixStencil(matrix) = stencil;
hypre_StructMatrixSymmElements(matrix) = symm_elements;
hypre_StructMatrixNumValues(matrix) = num_values;
}
/*-----------------------------------------------------------------------
* Set ghost-layer size for symmetric storage
* - All stencil coeffs are to be available at each point in the
* grid, as well as in the user-specified ghost layer.
*-----------------------------------------------------------------------*/
num_ghost = hypre_StructMatrixNumGhost(matrix);
stencil = hypre_StructMatrixStencil(matrix);
stencil_shape = hypre_StructStencilShape(stencil);
stencil_size = hypre_StructStencilSize(stencil);
symm_elements = hypre_StructMatrixSymmElements(matrix);
for (i = 0; i < stencil_size; i++)
{
if (symm_elements[i] >= 0)
{
for (d = 0; d < ndim; d++)
{
extra_ghost[2*d] =
hypre_max(extra_ghost[2*d], -hypre_IndexD(stencil_shape[i], d));
extra_ghost[2*d + 1] =
hypre_max(extra_ghost[2*d + 1], hypre_IndexD(stencil_shape[i], d));
}
}
}
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] += extra_ghost[2*d];
num_ghost[2*d + 1] += extra_ghost[2*d + 1];
}
/*-----------------------------------------------------------------------
* Set up data_space
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixDataSpace(matrix) == NULL)
{
boxes = hypre_StructGridBoxes(grid);
data_space = hypre_BoxArrayCreate(hypre_BoxArraySize(boxes));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_CopyBox(box, data_box);
for (d = 0; d < 3; d++)
{
hypre_BoxIMinD(data_box, d) -= num_ghost[2*d];
hypre_BoxIMaxD(data_box, d) += num_ghost[2*d + 1];
}
}
hypre_StructMatrixDataSpace(matrix) = data_space;
}
/*-----------------------------------------------------------------------
* Set up data_indices array and data-size
*-----------------------------------------------------------------------*/
if (hypre_StructMatrixDataIndices(matrix) == NULL)
{
data_space = hypre_StructMatrixDataSpace(matrix);
data_indices = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(data_space));
constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix);
data_size = 0;
if ( constant_coefficient==0 )
{
hypre_ForBoxI(i, data_space)
{
data_box = hypre_BoxArrayBox(data_space, i);
data_box_volume = hypre_BoxVolume(data_box);
data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size);
/* set pointers for "stored" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] < 0)
{
data_indices[i][j] = data_size;
data_size += data_box_volume;
}
}
/* set pointers for "symmetric" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] >= 0)
{
data_indices[i][j] = data_indices[i][symm_elements[j]] +
hypre_BoxOffsetDistance(data_box, stencil_shape[j]);
}
}
}
}
else if ( constant_coefficient==1 )
{
hypre_ForBoxI(i, data_space)
{
data_box = hypre_BoxArrayBox(data_space, i);
data_box_volume = hypre_BoxVolume(data_box);
data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size);
/* set pointers for "stored" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] < 0)
{
data_indices[i][j] = data_size;
++data_size;
}
}
/* set pointers for "symmetric" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] >= 0)
{
data_indices[i][j] = data_indices[i][symm_elements[j]];
}
}
}
}
else
{
hypre_assert( constant_coefficient == 2 );
data_size += stencil_size; /* all constant coeffs at the beginning */
/* ... this allocates a little more space than is absolutely necessary */
hypre_ForBoxI(i, data_space)
{
data_box = hypre_BoxArrayBox(data_space, i);
data_box_volume = hypre_BoxVolume(data_box);
data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size);
/* set pointers for "stored" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] < 0)
{
if (
hypre_IndexX(stencil_shape[j])==0 &&
hypre_IndexY(stencil_shape[j])==0 &&
hypre_IndexZ(stencil_shape[j])==0 ) /* diagonal, variable
* coefficient */
{
data_indices[i][j] = data_size;
data_size += data_box_volume;
}
else /* off-diagonal, constant coefficient */
{
data_indices[i][j] = j;
}
}
}
/* set pointers for "symmetric" coefficients */
for (j = 0; j < stencil_size; j++)
{
if (symm_elements[j] >= 0)
{
if (
hypre_IndexX(stencil_shape[j])==0 &&
hypre_IndexY(stencil_shape[j])==0 &&
hypre_IndexZ(stencil_shape[j])==0 ) /* diagonal, variable
* coefficient */
{
data_indices[i][j] = data_indices[i][symm_elements[j]] +
hypre_BoxOffsetDistance(data_box, stencil_shape[j]);
}
else /* off-diagonal, constant coefficient */
{
data_indices[i][j] = data_indices[i][symm_elements[j]];
}
}
}
}
}
hypre_StructMatrixDataIndices(matrix) = data_indices;
hypre_StructMatrixDataSize(matrix) = data_size;
}
/*-----------------------------------------------------------------------
* Set total number of nonzero coefficients
* For constant coefficients, this is unrelated to the amount of data
* actually stored.
*-----------------------------------------------------------------------*/
hypre_StructMatrixGlobalSize(matrix) =
hypre_StructGridGlobalSize(grid) * stencil_size;
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixInitializeData
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixInitializeData( hypre_StructMatrix *matrix,
double *data )
{
hypre_StructMatrixData(matrix) = data;
hypre_StructMatrixDataAlloced(matrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixInitialize( hypre_StructMatrix *matrix )
{
double *data;
hypre_StructMatrixInitializeShell(matrix);
data = hypre_StructMatrixData(matrix);
data = hypre_SharedCTAlloc(double, hypre_StructMatrixDataSize(matrix));
hypre_StructMatrixInitializeData(matrix, data);
hypre_StructMatrixDataAlloced(matrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* should not be called to set a constant-coefficient part of the matrix,
* call hypre_StructMatrixSetConstantValues instead
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixSetValues( hypre_StructMatrix *matrix,
hypre_Index grid_index,
HYPRE_Int num_stencil_indices,
HYPRE_Int *stencil_indices,
double *values,
HYPRE_Int action,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
hypre_Index center_index;
hypre_StructStencil *stencil;
HYPRE_Int center_rank;
HYPRE_Int *symm_elements;
HYPRE_Int constant_coefficient;
double *matp;
HYPRE_Int i, s, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix);
symm_elements = hypre_StructMatrixSymmElements(matrix);
if (outside > 0)
{
grid_boxes = hypre_StructMatrixDataSpace(matrix);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
}
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Set the matrix coefficients
*-----------------------------------------------------------------------*/
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) &&
(hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) &&
(hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) &&
(hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) &&
(hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) &&
(hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) )
{
if ( constant_coefficient==2 )
{
hypre_SetIndex(center_index, 0, 0, 0);
stencil = hypre_StructMatrixStencil(matrix);
center_rank = hypre_StructStencilElementRank( stencil, center_index );
}
for (s = 0; s < num_stencil_indices; s++)
{
/* only set stored stencil values */
if (symm_elements[stencil_indices[s]] < 0)
{
if ( (constant_coefficient==1) ||
(constant_coefficient==2 && stencil_indices[s]!=center_rank ))
{
/* call SetConstantValues instead */
hypre_error(HYPRE_ERROR_GENERIC);
matp = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]);
}
else /* variable coefficient, constant_coefficient=0 */
{
matp = hypre_StructMatrixBoxDataValue(
matrix, i, stencil_indices[s], grid_index);
}
if (action > 0)
{
*matp += values[s];
}
else if (action > -1)
{
*matp = values[s];
}
else /* action < 0 */
{
values[s] = *matp;
}
}
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*
* should not be called to set a constant-coefficient part of the matrix,
* call hypre_StructMatrixSetConstantValues instead
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixSetBoxValues( hypre_StructMatrix *matrix,
hypre_Box *set_box,
hypre_Box *value_box,
HYPRE_Int num_stencil_indices,
HYPRE_Int *stencil_indices,
double *values,
HYPRE_Int action,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
hypre_Box *int_box;
hypre_Index center_index;
hypre_StructStencil *stencil;
HYPRE_Int center_rank;
HYPRE_Int *symm_elements;
hypre_BoxArray *data_space;
hypre_Box *data_box;
hypre_IndexRef data_start;
hypre_Index data_stride;
HYPRE_Int datai;
double *datap;
HYPRE_Int constant_coefficient;
hypre_Box *dval_box;
hypre_Index dval_start;
hypre_Index dval_stride;
HYPRE_Int dvali;
hypre_Index loop_size;
HYPRE_Int i, s, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix);
symm_elements = hypre_StructMatrixSymmElements(matrix);
if (outside > 0)
{
grid_boxes = hypre_StructMatrixDataSpace(matrix);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
}
data_space = hypre_StructMatrixDataSpace(matrix);
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Set the matrix coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(data_stride, 1, 1, 1);
int_box = hypre_BoxCreate();
dval_box = hypre_BoxDuplicate(value_box);
hypre_BoxIMinD(dval_box, 0) *= num_stencil_indices;
hypre_BoxIMaxD(dval_box, 0) *= num_stencil_indices;
hypre_BoxIMaxD(dval_box, 0) += num_stencil_indices - 1;
hypre_SetIndex(dval_stride, num_stencil_indices, 1, 1);
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_IntersectBoxes(set_box, grid_box, int_box);
/* if there was an intersection */
if (hypre_BoxVolume(int_box))
{
data_start = hypre_BoxIMin(int_box);
hypre_CopyIndex(data_start, dval_start);
hypre_IndexD(dval_start, 0) *= num_stencil_indices;
if ( constant_coefficient==2 )
{
hypre_SetIndex(center_index, 0, 0, 0);
stencil = hypre_StructMatrixStencil(matrix);
center_rank = hypre_StructStencilElementRank( stencil, center_index );
}
for (s = 0; s < num_stencil_indices; s++)
{
/* only set stored stencil values */
if (symm_elements[stencil_indices[s]] < 0)
{
datap = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]);
if ( (constant_coefficient==1) ||
(constant_coefficient==2 && stencil_indices[s]!=center_rank ))
/* datap has only one data point for a given i and s */
{
/* should have called SetConstantValues */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_BoxGetSize(int_box, loop_size);
if (action > 0)
{
datai = hypre_CCBoxIndexRank(data_box,data_start);
dvali = hypre_BoxIndexRank(dval_box,dval_start);
datap[datai] += values[dvali];
}
else if (action > -1)
{
datai = hypre_CCBoxIndexRank(data_box,data_start);
dvali = hypre_BoxIndexRank(dval_box,dval_start);
datap[datai] = values[dvali];
}
else
{
datai = hypre_CCBoxIndexRank(data_box,data_start);
dvali = hypre_BoxIndexRank(dval_box,dval_start);
values[dvali] = datap[datai];
if (action == -2)
{
datap[datai] = 0;
}
}
}
else /* variable coefficient: constant_coefficient==0
or diagonal with constant_coefficient==2 */
{
hypre_BoxGetSize(int_box, loop_size);
if (action > 0)
{
hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
datap[datai] += values[dvali];
}
hypre_BoxLoop2End(datai, dvali);
}
else if (action > -1)
{
hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
datap[datai] = values[dvali];
}
hypre_BoxLoop2End(datai, dvali);
}
else
{
hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
values[dvali] = datap[datai];
if (action == -2)
{
datap[datai] = 0;
}
}
hypre_BoxLoop2End(datai, dvali);
}
}
} /* end if (symm_elements) */
hypre_IndexD(dval_start, 0) ++;
}
}
}
hypre_BoxDestroy(int_box);
hypre_BoxDestroy(dval_box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out (not implemented, just gets values)
* should be called to set a constant-coefficient part of the matrix
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixSetConstantValues( hypre_StructMatrix *matrix,
HYPRE_Int num_stencil_indices,
HYPRE_Int *stencil_indices,
double *values,
HYPRE_Int action )
{
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index center_index;
hypre_StructStencil *stencil;
HYPRE_Int center_rank;
HYPRE_Int constant_coefficient;
double *matp;
HYPRE_Int i, s;
boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix);
if ( constant_coefficient==1 )
{
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
if (action > 0)
{
for (s = 0; s < num_stencil_indices; s++)
{
matp = hypre_StructMatrixBoxData(matrix, i,
stencil_indices[s]);
*matp += values[s];
}
}
else if (action > -1)
{
for (s = 0; s < num_stencil_indices; s++)
{
matp = hypre_StructMatrixBoxData(matrix, i,
stencil_indices[s]);
*matp = values[s];
}
}
else /* action < 0 */
{
for (s = 0; s < num_stencil_indices; s++)
{
matp = hypre_StructMatrixBoxData(matrix, i,
stencil_indices[s]);
values[s] = *matp;
}
}
}
}
else if ( constant_coefficient==2 )
{
hypre_SetIndex(center_index, 0, 0, 0);
stencil = hypre_StructMatrixStencil(matrix);
center_rank = hypre_StructStencilElementRank( stencil, center_index );
if ( action > 0 )
{
for (s = 0; s < num_stencil_indices; s++)
{
if ( stencil_indices[s] == center_rank )
{ /* center (diagonal), like constant_coefficient==0
We consider it an error, but do the best we can. */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
hypre_StructMatrixSetBoxValues( matrix, box, box,
num_stencil_indices,
stencil_indices,
values, action, -1, 0 );
}
}
else
{ /* non-center, like constant_coefficient==1 */
matp = hypre_StructMatrixBoxData(matrix, 0,
stencil_indices[s]);
*matp += values[s];
}
}
}
else if ( action > -1 )
{
for (s = 0; s < num_stencil_indices; s++)
{
if ( stencil_indices[s] == center_rank )
{ /* center (diagonal), like constant_coefficient==0
We consider it an error, but do the best we can. */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
hypre_StructMatrixSetBoxValues( matrix, box, box,
num_stencil_indices,
stencil_indices,
values, action, -1, 0 );
}
}
else
{ /* non-center, like constant_coefficient==1 */
matp = hypre_StructMatrixBoxData(matrix, 0,
stencil_indices[s]);
*matp += values[s];
}
}
}
else /* action<0 */
{
for (s = 0; s < num_stencil_indices; s++)
{
if ( stencil_indices[s] == center_rank )
{ /* center (diagonal), like constant_coefficient==0
We consider it an error, but do the best we can. */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
hypre_StructMatrixSetBoxValues( matrix, box, box,
num_stencil_indices,
stencil_indices,
values, -1, -1, 0 );
}
}
else
{ /* non-center, like constant_coefficient==1 */
matp = hypre_StructMatrixBoxData(matrix, 0,
stencil_indices[s]);
values[s] = *matp;
}
}
}
}
else /* constant_coefficient==0 */
{
/* We consider this an error, but do the best we can. */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
hypre_StructMatrixSetBoxValues( matrix, box, box,
num_stencil_indices, stencil_indices,
values, action, -1, 0 );
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (outside > 0): clear values possibly outside of the grid extents
* (outside = 0): clear values only inside the grid extents
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixClearValues( hypre_StructMatrix *matrix,
hypre_Index grid_index,
HYPRE_Int num_stencil_indices,
HYPRE_Int *stencil_indices,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
double *matp;
HYPRE_Int i, s, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
if (outside > 0)
{
grid_boxes = hypre_StructMatrixDataSpace(matrix);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
}
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Clear the matrix coefficients
*-----------------------------------------------------------------------*/
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) &&
(hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) &&
(hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) &&
(hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) &&
(hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) &&
(hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) )
{
for (s = 0; s < num_stencil_indices; s++)
{
matp = hypre_StructMatrixBoxDataValue(matrix, i, stencil_indices[s],
grid_index);
*matp = 0.0;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (outside > 0): clear values possibly outside of the grid extents
* (outside = 0): clear values only inside the grid extents
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixClearBoxValues( hypre_StructMatrix *matrix,
hypre_Box *clear_box,
HYPRE_Int num_stencil_indices,
HYPRE_Int *stencil_indices,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
hypre_Box *int_box;
HYPRE_Int *symm_elements;
hypre_BoxArray *data_space;
hypre_Box *data_box;
hypre_IndexRef data_start;
hypre_Index data_stride;
HYPRE_Int datai;
double *datap;
hypre_Index loop_size;
HYPRE_Int i, s, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
if (outside > 0)
{
grid_boxes = hypre_StructMatrixDataSpace(matrix);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
}
data_space = hypre_StructMatrixDataSpace(matrix);
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Clear the matrix coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(data_stride, 1, 1, 1);
symm_elements = hypre_StructMatrixSymmElements(matrix);
int_box = hypre_BoxCreate();
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_IntersectBoxes(clear_box, grid_box, int_box);
/* if there was an intersection */
if (hypre_BoxVolume(int_box))
{
data_start = hypre_BoxIMin(int_box);
for (s = 0; s < num_stencil_indices; s++)
{
/* only clear stencil entries that are explicitly stored */
if (symm_elements[stencil_indices[s]] < 0)
{
datap = hypre_StructMatrixBoxData(matrix, i,
stencil_indices[s]);
hypre_BoxGetSize(int_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size,
data_box,data_start,data_stride,datai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(datai)
{
datap[datai] = 0.0;
}
hypre_BoxLoop1End(datai);
}
}
}
}
hypre_BoxDestroy(int_box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixAssemble( hypre_StructMatrix *matrix )
{
HYPRE_Int *num_ghost = hypre_StructMatrixNumGhost(matrix);
HYPRE_Int comm_num_values, mat_num_values, constant_coefficient;
HYPRE_Int stencil_size;
hypre_StructStencil *stencil;
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
HYPRE_Int data_initial_offset = 0;
double *matrix_data = hypre_StructMatrixData(matrix);
double *matrix_data_comm = matrix_data;
/* BEGIN - variables for ghost layer identity code below */
hypre_StructGrid *grid;
hypre_BoxArray *boxes;
hypre_BoxManager *boxman;
hypre_BoxArray *data_space;
hypre_BoxArrayArray *boundary_boxes;
hypre_BoxArray *boundary_box_a;
hypre_BoxArray *entry_box_a;
hypre_BoxArray *tmp_box_a;
hypre_Box *data_box;
hypre_Box *boundary_box;
hypre_Box *entry_box;
hypre_BoxManEntry **entries;
hypre_IndexRef periodic;
hypre_Index loop_size;
hypre_Index index;
hypre_IndexRef start;
hypre_Index stride;
double *datap;
HYPRE_Int i, j, ei, datai;
HYPRE_Int num_entries;
/* End - variables for ghost layer identity code below */
constant_coefficient = hypre_StructMatrixConstantCoefficient( matrix );
/*-----------------------------------------------------------------------
* Set ghost zones along the domain boundary to the identity to enable code
* simplifications elsewhere in hypre (e.g., CyclicReduction).
*
* Intersect each data box with the BoxMan to get neighbors, then subtract
* the neighbors from the box to get the boundary boxes.
*-----------------------------------------------------------------------*/
if ( constant_coefficient!=1 )
{
data_space = hypre_StructMatrixDataSpace(matrix);
grid = hypre_StructMatrixGrid(matrix);
boxes = hypre_StructGridBoxes(grid);
boxman = hypre_StructGridBoxMan(grid);
periodic = hypre_StructGridPeriodic(grid);
boundary_boxes = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(data_space));
entry_box_a = hypre_BoxArrayCreate(0);
tmp_box_a = hypre_BoxArrayCreate(0);
hypre_ForBoxI(i, data_space)
{
/* copy data box to boundary_box_a */
boundary_box_a = hypre_BoxArrayArrayBoxArray(boundary_boxes, i);
hypre_BoxArraySetSize(boundary_box_a, 1);
boundary_box = hypre_BoxArrayBox(boundary_box_a, 0);
hypre_CopyBox(hypre_BoxArrayBox(data_space, i), boundary_box);
hypre_BoxManIntersect(boxman,
hypre_BoxIMin(boundary_box),
hypre_BoxIMax(boundary_box),
&entries , &num_entries);
/* put neighbor boxes into entry_box_a */
hypre_BoxArraySetSize(entry_box_a, num_entries);
for (ei = 0; ei < num_entries; ei++)
{
entry_box = hypre_BoxArrayBox(entry_box_a, ei);
hypre_BoxManEntryGetExtents(entries[ei],
hypre_BoxIMin(entry_box),
hypre_BoxIMax(entry_box));
}
hypre_TFree(entries);
/* subtract neighbor boxes (entry_box_a) from data box (boundary_box_a) */
hypre_SubtractBoxArrays(boundary_box_a, entry_box_a, tmp_box_a);
}
hypre_BoxArrayDestroy(entry_box_a);
hypre_BoxArrayDestroy(tmp_box_a);
/* set boundary ghost zones to the identity equation */
hypre_SetIndex(index, 0, 0, 0);
hypre_SetIndex(stride, 1, 1, 1);
data_space = hypre_StructMatrixDataSpace(matrix);
hypre_ForBoxI(i, data_space)
{
datap = hypre_StructMatrixExtractPointerByIndex(matrix, i, index);
if (datap)
{
data_box = hypre_BoxArrayBox(data_space, i);
boundary_box_a = hypre_BoxArrayArrayBoxArray(boundary_boxes, i);
hypre_ForBoxI(j, boundary_box_a)
{
boundary_box = hypre_BoxArrayBox(boundary_box_a, j);
start = hypre_BoxIMin(boundary_box);
hypre_BoxGetSize(boundary_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size,
data_box, start, stride, datai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(datai)
{
datap[datai] = 1.0;
}
hypre_BoxLoop1End(datai);
}
}
}
hypre_BoxArrayArrayDestroy(boundary_boxes);
}
/*-----------------------------------------------------------------------
* If the CommPkg has not been set up, set it up
*
* The matrix data array is assumed to have two segments - an initial
* segment of data constant over all space, followed by a segment with
* comm_num_values matrix entries for each mesh element. The mesh-dependent
* data is, of course, the only part relevent to communications.
* For constant_coefficient==0, all the data is mesh-dependent.
* For constant_coefficient==1, all data is constant.
* For constant_coefficient==2, both segments are non-null.
*-----------------------------------------------------------------------*/
mat_num_values = hypre_StructMatrixNumValues(matrix);
if ( constant_coefficient==0 )
{
comm_num_values = mat_num_values;
}
else if ( constant_coefficient==1 )
{
comm_num_values = 0;
}
else /* constant_coefficient==2 */
{
comm_num_values = 1;
stencil = hypre_StructMatrixStencil(matrix);
stencil_size = hypre_StructStencilSize(stencil);
data_initial_offset = stencil_size;
matrix_data_comm = &( matrix_data[data_initial_offset] );
}
comm_pkg = hypre_StructMatrixCommPkg(matrix);
if (!comm_pkg)
{
hypre_CreateCommInfoFromNumGhost(hypre_StructMatrixGrid(matrix),
num_ghost, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(matrix),
hypre_StructMatrixDataSpace(matrix),
comm_num_values, NULL, 0,
hypre_StructMatrixComm(matrix), &comm_pkg);
hypre_CommInfoDestroy(comm_info);
hypre_StructMatrixCommPkg(matrix) = comm_pkg;
}
/*-----------------------------------------------------------------------
* Update the ghost data
* This takes care of the communication needs of all known functions
* referencing the matrix.
*
* At present this is the only place where matrix data gets communicated.
* However, comm_pkg is kept as long as the matrix is, in case some
* future version hypre has a use for it - e.g. if the user replaces
* a matrix with a very similar one, we may not want to recompute comm_pkg.
*-----------------------------------------------------------------------*/
if ( constant_coefficient!=1 )
{
hypre_InitializeCommunication( comm_pkg,
matrix_data_comm,
matrix_data_comm, 0, 0,
&comm_handle );
hypre_FinalizeCommunication( comm_handle );
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixSetNumGhost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixSetNumGhost( hypre_StructMatrix *matrix,
HYPRE_Int *num_ghost )
{
HYPRE_Int d, ndim = hypre_StructMatrixDim(matrix);
for (d = 0; d < ndim; d++)
{
hypre_StructMatrixNumGhost(matrix)[2*d] = num_ghost[2*d];
hypre_StructMatrixNumGhost(matrix)[2*d + 1] = num_ghost[2*d + 1];
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixSetConstantCoefficient
* deprecated in user interface, in favor of SetConstantEntries.
* left here for internal use
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixSetConstantCoefficient( hypre_StructMatrix *matrix,
HYPRE_Int constant_coefficient )
{
hypre_StructMatrixConstantCoefficient(matrix) = constant_coefficient;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixSetConstantEntries
* - nentries is the number of array entries
* - Each HYPRE_Int entries[i] is an index into the shape array of the stencil
* of the matrix
* In the present version, only three possibilites are recognized:
* - no entries constant (constant_coefficient==0)
* - all entries constant (constant_coefficient==1)
* - all but the diagonal entry constant (constant_coefficient==2)
* If something else is attempted, this function will return a nonzero error.
* In the present version, if this function is called more than once, only
* the last call will take effect.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_StructMatrixSetConstantEntries( hypre_StructMatrix *matrix,
HYPRE_Int nentries,
HYPRE_Int *entries )
{
/* We make an array offdconst corresponding to the stencil's shape array,
and use "entries" to fill it with flags - 1 for constant, 0 otherwise.
By counting the nonzeros in offdconst, and by checking whether its
diagonal entry is nonzero, we can distinguish among the three
presently legal values of constant_coefficient, and detect input errors.
We do not need to treat duplicates in "entries" as an error condition.
*/
hypre_StructStencil *stencil = hypre_StructMatrixUserStencil(matrix);
/* ... Stencil doesn't exist yet */
HYPRE_Int stencil_size = hypre_StructStencilSize(stencil);
HYPRE_Int *offdconst = hypre_CTAlloc(HYPRE_Int, stencil_size);
/* ... note: CTAlloc initializes to 0 (normally it works by calling calloc) */
HYPRE_Int nconst = 0;
HYPRE_Int constant_coefficient, diag_rank;
hypre_Index diag_index;
HYPRE_Int i, j;
for ( i=0; i<nentries; ++i )
{
offdconst[ entries[i] ] = 1;
}
for ( j=0; j<stencil_size; ++j )
{
nconst += offdconst[j];
}
if ( nconst<=0 ) constant_coefficient=0;
else if ( nconst>=stencil_size ) constant_coefficient=1;
else
{
hypre_SetIndex(diag_index, 0, 0, 0);
diag_rank = hypre_StructStencilElementRank( stencil, diag_index );
if ( offdconst[diag_rank]==0 )
{
constant_coefficient=2;
if ( nconst!=(stencil_size-1) )
{
hypre_error(HYPRE_ERROR_GENERIC);
}
}
else
{
constant_coefficient=0;
hypre_error(HYPRE_ERROR_GENERIC);
}
}
hypre_StructMatrixSetConstantCoefficient( matrix, constant_coefficient );
hypre_TFree(offdconst);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixClearGhostValues( hypre_StructMatrix *matrix )
{
hypre_Box *m_data_box;
HYPRE_Int mi;
double *mp;
hypre_StructStencil *stencil;
HYPRE_Int *symm_elements;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_BoxArray *diff_boxes;
hypre_Box *diff_box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i, j, s;
/*-----------------------------------------------------------------------
* Set the matrix coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(unit_stride, 1, 1, 1);
stencil = hypre_StructMatrixStencil(matrix);
symm_elements = hypre_StructMatrixSymmElements(matrix);
boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix));
diff_boxes = hypre_BoxArrayCreate(0);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
m_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(matrix), i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(m_data_box, box, diff_boxes);
for (s = 0; s < hypre_StructStencilSize(stencil); s++)
{
/* only clear stencil entries that are explicitly stored */
if (symm_elements[s] < 0)
{
mp = hypre_StructMatrixBoxData(matrix, i, s);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
start = hypre_BoxIMin(diff_box);
hypre_BoxGetSize(diff_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size,
m_data_box, start, unit_stride, mi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(mi)
{
mp[mi] = 0.0;
}
hypre_BoxLoop1End(mi);
}
}
}
}
hypre_BoxArrayDestroy(diff_boxes);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixPrint( const char *filename,
hypre_StructMatrix *matrix,
HYPRE_Int all )
{
FILE *file;
char new_filename[255];
hypre_StructGrid *grid;
hypre_BoxArray *boxes;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size;
hypre_Index center_index;
HYPRE_Int num_values;
hypre_BoxArray *data_space;
HYPRE_Int *symm_elements;
HYPRE_Int i, j;
HYPRE_Int constant_coefficient;
HYPRE_Int center_rank;
HYPRE_Int myid;
constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix);
/*----------------------------------------
* Open file
*----------------------------------------*/
#ifdef HYPRE_USE_PTHREADS
#if hypre_MPI_Comm_rank == hypre_thread_MPI_Comm_rank
#undef hypre_MPI_Comm_rank
#endif
#endif
hypre_MPI_Comm_rank(hypre_StructMatrixComm(matrix), &myid);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_printf("Error: can't open output file %s\n", new_filename);
exit(1);
}
/*----------------------------------------
* Print header info
*----------------------------------------*/
hypre_fprintf(file, "StructMatrix\n");
hypre_fprintf(file, "\nSymmetric: %d\n", hypre_StructMatrixSymmetric(matrix));
hypre_fprintf(file, "\nConstantCoefficient: %d\n",
hypre_StructMatrixConstantCoefficient(matrix));
/* print grid info */
hypre_fprintf(file, "\nGrid:\n");
grid = hypre_StructMatrixGrid(matrix);
hypre_StructGridPrint(file, grid);
/* print stencil info */
hypre_fprintf(file, "\nStencil:\n");
stencil = hypre_StructMatrixStencil(matrix);
stencil_shape = hypre_StructStencilShape(stencil);
num_values = hypre_StructMatrixNumValues(matrix);
symm_elements = hypre_StructMatrixSymmElements(matrix);
hypre_fprintf(file, "%d\n", num_values);
stencil_size = hypre_StructStencilSize(stencil);
j = 0;
for (i=0; i<stencil_size; i++)
{
if (symm_elements[i] < 0)
{
hypre_fprintf(file, "%d: %d %d %d\n", j++,
hypre_IndexX(stencil_shape[i]),
hypre_IndexY(stencil_shape[i]),
hypre_IndexZ(stencil_shape[i]));
}
}
/*----------------------------------------
* Print data
*----------------------------------------*/
data_space = hypre_StructMatrixDataSpace(matrix);
if (all)
boxes = data_space;
else
boxes = hypre_StructGridBoxes(grid);
hypre_fprintf(file, "\nData:\n");
if ( constant_coefficient==1 )
{
hypre_PrintCCBoxArrayData(file, boxes, data_space, num_values,
hypre_StructMatrixData(matrix));
}
else if ( constant_coefficient==2 )
{
hypre_SetIndex(center_index, 0, 0, 0);
center_rank = hypre_StructStencilElementRank( stencil, center_index );
hypre_PrintCCVDBoxArrayData(file, boxes, data_space, num_values,
center_rank, stencil_size, symm_elements,
hypre_StructMatrixData(matrix));
}
else
{
hypre_PrintBoxArrayData(file, boxes, data_space, num_values,
hypre_StructMatrixData(matrix));
}
/*----------------------------------------
* Close file
*----------------------------------------*/
fflush(file);
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixMigrate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructMatrixMigrate( hypre_StructMatrix *from_matrix,
hypre_StructMatrix *to_matrix )
{
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
HYPRE_Int constant_coefficient, comm_num_values;
HYPRE_Int stencil_size, mat_num_values;
hypre_StructStencil *stencil;
HYPRE_Int data_initial_offset = 0;
double *matrix_data_from = hypre_StructMatrixData(from_matrix);
double *matrix_data_to = hypre_StructMatrixData(to_matrix);
double *matrix_data_comm_from = matrix_data_from;
double *matrix_data_comm_to = matrix_data_to;
/*------------------------------------------------------
* Set up hypre_CommPkg
*------------------------------------------------------*/
constant_coefficient = hypre_StructMatrixConstantCoefficient( from_matrix );
hypre_assert( constant_coefficient == hypre_StructMatrixConstantCoefficient( to_matrix ) );
mat_num_values = hypre_StructMatrixNumValues(from_matrix);
hypre_assert( mat_num_values = hypre_StructMatrixNumValues(to_matrix) );
if ( constant_coefficient==0 )
{
comm_num_values = mat_num_values;
}
else if ( constant_coefficient==1 )
{
comm_num_values = 0;
}
else /* constant_coefficient==2 */
{
comm_num_values = 1;
stencil = hypre_StructMatrixStencil(from_matrix);
stencil_size = hypre_StructStencilSize(stencil);
hypre_assert(stencil_size ==
hypre_StructStencilSize( hypre_StructMatrixStencil(to_matrix) ) );
data_initial_offset = stencil_size;
matrix_data_comm_from = &( matrix_data_from[data_initial_offset] );
matrix_data_comm_to = &( matrix_data_to[data_initial_offset] );
}
hypre_CreateCommInfoFromGrids(hypre_StructMatrixGrid(from_matrix),
hypre_StructMatrixGrid(to_matrix),
&comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(from_matrix),
hypre_StructMatrixDataSpace(to_matrix),
comm_num_values, NULL, 0,
hypre_StructMatrixComm(from_matrix), &comm_pkg);
hypre_CommInfoDestroy(comm_info);
/* is this correct for periodic? */
/*-----------------------------------------------------------------------
* Migrate the matrix data
*-----------------------------------------------------------------------*/
if ( constant_coefficient!=1 )
{
hypre_InitializeCommunication( comm_pkg,
matrix_data_comm_from,
matrix_data_comm_to, 0, 0,
&comm_handle );
hypre_FinalizeCommunication( comm_handle );
}
hypre_CommPkgDestroy(comm_pkg);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructMatrixRead
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_StructMatrixRead( MPI_Comm comm,
const char *filename,
HYPRE_Int *num_ghost )
{
FILE *file;
char new_filename[255];
hypre_StructMatrix *matrix;
hypre_StructGrid *grid;
hypre_BoxArray *boxes;
HYPRE_Int dim;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int stencil_size, real_stencil_size;
HYPRE_Int num_values;
hypre_BoxArray *data_space;
HYPRE_Int symmetric;
HYPRE_Int constant_coefficient;
HYPRE_Int i, idummy;
HYPRE_Int myid;
/*----------------------------------------
* Open file
*----------------------------------------*/
#ifdef HYPRE_USE_PTHREADS
#if hypre_MPI_Comm_rank == hypre_thread_MPI_Comm_rank
#undef hypre_MPI_Comm_rank
#endif
#endif
hypre_MPI_Comm_rank(comm, &myid );
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_printf("Error: can't open output file %s\n", new_filename);
exit(1);
}
/*----------------------------------------
* Read header info
*----------------------------------------*/
hypre_fscanf(file, "StructMatrix\n");
hypre_fscanf(file, "\nSymmetric: %d\n", &symmetric);
hypre_fscanf(file, "\nConstantCoefficient: %d\n", &constant_coefficient);
/* read grid info */
hypre_fscanf(file, "\nGrid:\n");
hypre_StructGridRead(comm,file,&grid);
/* read stencil info */
hypre_fscanf(file, "\nStencil:\n");
dim = hypre_StructGridDim(grid);
hypre_fscanf(file, "%d\n", &stencil_size);
if (symmetric) { real_stencil_size = 2*stencil_size-1; }
else { real_stencil_size = stencil_size; }
/* ... real_stencil_size is the stencil size of the matrix after it's fixed up
by the call (if any) of hypre_StructStencilSymmetrize from
hypre_StructMatrixInitializeShell.*/
stencil_shape = hypre_CTAlloc(hypre_Index, stencil_size);
for (i = 0; i < stencil_size; i++)
{
hypre_fscanf(file, "%d: %d %d %d\n", &idummy,
&hypre_IndexX(stencil_shape[i]),
&hypre_IndexY(stencil_shape[i]),
&hypre_IndexZ(stencil_shape[i]));
}
stencil = hypre_StructStencilCreate(dim, stencil_size, stencil_shape);
/*----------------------------------------
* Initialize the matrix
*----------------------------------------*/
matrix = hypre_StructMatrixCreate(comm, grid, stencil);
hypre_StructMatrixSymmetric(matrix) = symmetric;
hypre_StructMatrixConstantCoefficient(matrix) = constant_coefficient;
hypre_StructMatrixSetNumGhost(matrix, num_ghost);
hypre_StructMatrixInitialize(matrix);
/*----------------------------------------
* Read data
*----------------------------------------*/
boxes = hypre_StructGridBoxes(grid);
data_space = hypre_StructMatrixDataSpace(matrix);
num_values = hypre_StructMatrixNumValues(matrix);
hypre_fscanf(file, "\nData:\n");
if ( constant_coefficient==0 )
{
hypre_ReadBoxArrayData(file, boxes, data_space, num_values,
hypre_StructMatrixData(matrix));
}
else
{
hypre_assert( constant_coefficient<=2 );
hypre_ReadBoxArrayData_CC( file, boxes, data_space,
stencil_size, real_stencil_size,
constant_coefficient,
hypre_StructMatrixData(matrix));
}
/*----------------------------------------
* Assemble the matrix
*----------------------------------------*/
hypre_StructMatrixAssemble(matrix);
/*----------------------------------------
* Close file
*----------------------------------------*/
fclose(file);
return matrix;
}
|
SuperArc.h | /// \ingroup base
//
/// \class ttk::FTMTree_MT
/// \author Charles Gueunet <charles.gueunet@lip6.fr>
/// \date June 2016.
///
///\brief TTK classe representing a SuperArc of a tree,
/// containing regular vertices.
///
///\param dataType Data type of the input scalar field (char, float,
/// etc.).
#ifndef SUPERARC_H
#define SUPERARC_H
#include <list>
#include <vector>
#include <Debug.h>
#include "FTMTree_DataTypes.h"
#include "Segmentation.h"
#include "Structures.h"
namespace ttk
{
namespace ftm
{
class SuperArc
{
private:
// Extrema
idNode downNodeId_, upNodeId_;
// Stat of this arc (visible, hidden, merged) if merged...
ComponentState state_;
// Keep th last vertex seen by this arc
// After the build a a merge tree, a close step is
// done, using this field to close each root arc
SimplexId lastVisited_;
// Segmentation related
ArcRegion region_;
SimplexId verticesSeen_;
idSuperArc normalizedId_;
public:
// -----------------
// CONSTRUCT
// -----------------
// This arc will needs to receive both ends before being printed
SuperArc()
: downNodeId_(nullNodes),
upNodeId_(nullNodes),
state_(ComponentState::Visible),
lastVisited_(nullVertex),
region_(),
verticesSeen_(0),
normalizedId_(nullSuperArc)
{
}
SuperArc(idNode d, idNode u, const ComponentState &state = ComponentState::Visible)
: downNodeId_(d),
upNodeId_(u),
state_(state),
lastVisited_(nullVertex),
region_(),
verticesSeen_(0),
normalizedId_(nullSuperArc)
{
}
// ------------------
// ACCESSOR
// --------------------
// node
inline idNode getUpNodeId(void) const
{
return upNodeId_;
}
inline idNode getDownNodeId(void) const
{
return downNodeId_;
}
inline void setUpNodeId(idNode upId)
{
upNodeId_ = upId;
}
inline void setDownNodeId(idNode downId)
{
downNodeId_ = downId;
}
// last vertex seen, nb vertex seen & ids
inline SimplexId getLastVisited(void) const
{
return lastVisited_;
}
inline void setLastVisited(SimplexId vertId)
{
lastVisited_ = vertId;
++verticesSeen_;
}
inline void atomicIncVisited(const SimplexId nb=1)
{
#ifdef TTK_ENABLE_OPENMP
#pragma omp atomic update
#endif
verticesSeen_ += nb;
}
inline void decrNbSeen(void)
{
--verticesSeen_;
}
inline SimplexId getNbVertSeen(void) const
{
return verticesSeen_;
}
inline idSuperArc getNormalizedId(void) const
{
return normalizedId_;
}
inline void setNormalizeIds(const idSuperArc id)
{
normalizedId_ = id;
}
// state
inline bool isHidden(void) const
{
return state_ == ComponentState::Hidden;
}
inline bool isMerged(void) const
{
return state_ == ComponentState::Merged;
}
inline bool isVisible(void) const
{
return state_ == ComponentState::Visible;
}
// ------------
// Segmentation
// ------------
// Fonction using ArcRegion
inline void concat(const segm_it &begin, const segm_it &end)
{
region_.concat(begin, end);
}
inline void concat(const ArcRegion &r)
{
region_.concat(r);
}
inline void concat(const SuperArc &s)
{
region_.concat(s.region_);
}
inline void concat(std::tuple<segm_it, segm_it> its)
{
region_.concat(std::get<0>(its), std::get<1>(its));
}
// prerequisite for the following segmentation functions
inline void createSegmentation(const Scalars *s)
{
region_.createSegmentation(s);
}
// Direct read access to the list of region
const std::list<Region> &getRegions(void) const
{
return region_.getRegions();
}
std::list<Region> &getRegions(void)
{
return region_.getRegions();
}
const ArcRegion &getRegion(void) const
{
return region_;
}
size_t regionSize(void) const
{
return region_.count();
}
void clearSegmentation(void)
{
region_.clear();
}
// access segmentation (after createSegmentation)
// vector-like
inline size_t size(void) const
{
return region_.size();
}
std::vector<SimplexId>::iterator begin(void)
{
return region_.begin();
}
std::vector<SimplexId>::iterator end(void)
{
return region_.end();
}
SimplexId operator[](SimplexId v) const
{
return region_[v];
}
SimplexId &operator[](SimplexId v)
{
return region_[v];
}
// Access Segmentation legacy
SimplexId getNumberOfRegularNodes() const
{
return region_.size();
}
SimplexId getRegularNodeId(SimplexId id) const
{
return region_[id];
}
// process segmentation
// keep the front segmentation, return the back
std::tuple<SimplexId, ArcRegion> splitFront(SimplexId v, const Scalars *s)
{
return region_.splitFront(v, s);
}
// Keep the back, return the front
std::tuple<SimplexId, ArcRegion> splitBack(SimplexId v, const Scalars *s)
{
return region_.splitBack(v, s);
}
SimplexId findBelow(SimplexId v, const Scalars *s,
const std::vector<idCorresp> &vert2treeOther = std::vector<idCorresp>()) const
{
return region_.findBelow(v, s, vert2treeOther);
}
bool merge(const SuperArc &s)
{
return region_.merge(s.region_);
}
std::string printReg(void) const
{
return region_.print();
}
};
}
}
#endif /* end of include guard: SUPERARC_H */
|
GeometryConverter.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <unordered_set>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/BuildingModel.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcGloballyUniqueId.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h>
#include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include "IncludeCarveHeaders.h"
#include "GeometryInputData.h"
#include "RepresentationConverter.h"
#include "CSG_Adapter.h"
class GeometryConverter : public StatusCallback
{
protected:
shared_ptr<BuildingModel> m_ifc_model;
shared_ptr<GeometrySettings> m_geom_settings;
shared_ptr<RepresentationConverter> m_representation_converter;
std::map<std::string, shared_ptr<ProductShapeData> > m_product_shape_data;
std::map<std::string, shared_ptr<BuildingObject> > m_map_outside_spatial_structure;
double m_recent_progress = 0;
double m_csg_eps = 1.5e-05;
std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages;
#ifdef ENABLE_OPENMP
Mutex m_writelock_messages;
#endif
public:
// getters and setters
shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; }
shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; }
shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; }
std::map<std::string, shared_ptr<ProductShapeData> >& getShapeInputData() { return m_product_shape_data; }
std::map<std::string, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; }
GeometryConverter( shared_ptr<BuildingModel>& ifc_model )
{
m_ifc_model = ifc_model;
m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() );
resetNumVerticesPerCircle();
shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter();
m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) );
// redirect all messages to this->messageTarget
m_ifc_model->setMessageTarget( this );
m_representation_converter->setMessageTarget( this );
}
virtual ~GeometryConverter() {}
void resetModel()
{
progressTextCallback( L"Unloading model, cleaning up memory..." );
clearInputCache();
m_recent_progress = 0.0;
m_ifc_model->clearCache();
m_ifc_model->clearIfcModel();
progressTextCallback( L"Unloading model done" );
progressValueCallback( 0.0, "parse" );
#ifdef _DEBUG
GeomDebugDump::clearMeshsetDump();
#endif
}
void clearInputCache()
{
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
m_messages.clear();
}
void resetNumVerticesPerCircle()
{
m_geom_settings->resetNumVerticesPerCircle();
}
void setCsgEps(double eps)
{
m_csg_eps = eps;
}
void setModel( shared_ptr<BuildingModel> model )
{
if( m_ifc_model )
{
m_ifc_model->unsetMessageCallBack();
}
clearInputCache();
m_ifc_model = model;
m_representation_converter->clearCache();
m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() );
m_ifc_model->setMessageTarget( this );
}
void resolveProjectStructure( shared_ptr<ProductShapeData>& product_data )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
return;
}
product_data->m_added_to_spatial_structure = true;
const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_product->m_IsDecomposedBy_inverse;
for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii )
{
const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii];
if( rel_aggregates_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr );
if( rel_aggregates )
{
const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects;
for( size_t jj = 0; jj < vec_related_objects.size(); ++jj )
{
const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj];
if( related_obj_def )
{
std::string related_guid;
if (related_obj_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
related_guid = converterX.to_bytes(related_obj_def->m_GlobalId->m_value);
}
auto it_product_map = m_product_shape_data.find(related_guid);
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_product);
if( spatial_ele )
{
const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse;
for( size_t ii = 0; ii < vec_contains.size(); ++ii )
{
const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii];
if( rel_contained_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr );
if( rel_contained )
{
const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements;
for( size_t jj = 0; jj < vec_related_elements.size(); ++jj )
{
const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj];
if( related_product )
{
std::string related_guid;
if (related_product->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
related_guid = converterX.to_bytes(related_product->m_GlobalId->m_value);
}
auto it_product_map = m_product_shape_data.find(related_guid);
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
}
// TODO: handle IfcRelAssignsToProduct
}
void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeData>& product_shape )
{
if( !prop_set )
{
return;
}
for( auto& ifc_property : prop_set->m_HasProperties )
{
if( !ifc_property )
{
continue;
}
shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property);
if( simple_property )
{
// ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue,
// IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue))
shared_ptr<IfcIdentifier> property_name = simple_property->m_Name;
std::wstring name_str = property_name->m_value;
if( name_str.compare( L"LayerName" ) == 0 )
{
// TODO: implement layers
}
shared_ptr<IfcText> description = simple_property->m_Description;
shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property);
if( property_single_value )
{
//shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional
//shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional
}
continue;
}
shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property);
if( complex_property )
{
if( !complex_property->m_UsageName ) continue;
if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 )
{
vec4 vec_color;
m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color );
shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) );
if( !appearance_data )
{
throw OutOfMemoryException( __FUNC__ );
}
appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY;
appearance_data->m_color_ambient.setColor( vec_color );
appearance_data->m_color_diffuse.setColor( vec_color );
appearance_data->m_color_specular.setColor( vec_color );
appearance_data->m_shininess = 35.f;
product_shape->addAppearance( appearance_data );
}
}
}
}
/*\brief method convertGeometry: Creates geometry for Carve from previously loaded BuildingModel model.
**/
void convertGeometry()
{
progressTextCallback( L"Creating geometry..." );
progressValueCallback( 0, "geometry" );
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
if( !m_ifc_model )
{
return;
}
shared_ptr<ProductShapeData> ifc_project_data;
std::vector<shared_ptr<IfcObjectDefinition> > vec_object_definitions;
double length_to_meter_factor = 1.0;
if( m_ifc_model->getUnitConverter() )
{
length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
carve::setEpsilon( m_csg_eps );
const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities();
for( auto it = map_entities.begin(); it != map_entities.end(); ++it )
{
shared_ptr<BuildingEntity> obj = it->second;
shared_ptr<IfcObjectDefinition> product = dynamic_pointer_cast<IfcObjectDefinition>(obj);
if(product)
{
vec_object_definitions.push_back(product);
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<std::string, shared_ptr<ProductShapeData> >* map_products_ptr = &m_product_shape_data;
const int num_object_definitions = (int)vec_object_definitions.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
#pragma omp parallel firstprivate(num_object_definitions) shared(map_products_ptr)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,40)
#endif
for( int i = 0; i < num_object_definitions; ++i )
{
shared_ptr<IfcObjectDefinition> ifc_product = vec_object_definitions[i];
const int entity_id = ifc_product->m_entity_id;
std::string guid;
if (ifc_product->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value);
}
shared_ptr<ProductShapeData> product_geom_input_data( new ProductShapeData( entity_id ) );
product_geom_input_data->m_ifc_object_definition = ifc_product;
std::stringstream thread_err;
if( !m_geom_settings->getRenderObjectFilter()(ifc_product) )
{
// geometry will be created in method subtractOpenings
continue;
}
else if( dynamic_pointer_cast<IfcProject>(ifc_product) )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_ifc_project );
#endif
ifc_project_data = product_geom_input_data;
}
try
{
convertIfcProductShape( product_geom_input_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( carve::exception& e )
{
thread_err << e.str();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << entity_id;
}
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_products_ptr->insert( std::make_pair( guid, product_geom_input_data ) );
if( thread_err.tellp() > 0 )
{
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
}
// progress callback
double progress = (double)i / (double)num_object_definitions;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "geometry" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
// subtract openings in related objects, such as IFCBUILDINGELEMENTPART connected to a window through IFCRELAGGREGATES
for( auto it = map_products_ptr->begin(); it != map_products_ptr->end(); ++it )
{
shared_ptr<ProductShapeData> product_geom_input_data = it->second;
try
{
subtractOpeningsInRelatedObjects(product_geom_input_data);
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( carve::exception& e )
{
messageCallback(e.str(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( std::exception& e )
{
messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "");
}
catch( ... )
{
messageCallback("undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__);
}
}
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data );
}
// check if there are entities that are not in spatial structure
for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes )
{
shared_ptr<ProductShapeData> product_shape = it_product_shapes->second;
if( !product_shape )
{
continue;
}
if( !product_shape->m_added_to_spatial_structure )
{
if( !product_shape->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition );
shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def);
if( !m_geom_settings->getRenderObjectFilter()(ifc_object_def) )
{
continue;
}
std::string guid;
if (ifc_object_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value);
}
m_map_outside_spatial_structure[guid] = ifc_object_def;
}
}
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
m_representation_converter->getProfileCache()->clearProfileCache();
progressTextCallback( L"Loading file done" );
progressValueCallback( 1.0, "geometry" );
}
//\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertIfcProductShape( shared_ptr<ProductShapeData>& product_shape )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
return;
}
if( !ifc_product->m_Representation )
{
return;
}
double length_factor = 1.0;
if( m_ifc_model )
{
if( m_ifc_model->getUnitConverter() )
{
length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
}
}
// evaluate IFC geometry
shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation;
std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations;
for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations )
{
const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations];
if( !representation )
{
continue;
}
try
{
shared_ptr<RepresentationData> representation_data( new RepresentationData() );
m_representation_converter->convertIfcRepresentation( representation, representation_data );
product_shape->m_vec_representations.push_back( representation_data );
representation_data->m_parent_product = product_shape;
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
}
// IfcProduct has an ObjectPlacement that can be local or global
product_shape->m_object_placement = ifc_product->m_ObjectPlacement;
if( ifc_product->m_ObjectPlacement )
{
// IfcPlacement2Matrix follows related placements in case of local coordinate systems
std::unordered_set<IfcObjectPlacement*> placement_already_applied;
m_representation_converter->getPlacementConverter()->convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, product_shape, placement_already_applied, false );
}
// handle openings
std::vector<shared_ptr<ProductShapeData> > vec_opening_data;
const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( ifc_element )
{
m_representation_converter->subtractOpenings(ifc_element, product_shape);
}
// Fetch the IFCProduct relationships
if( ifc_product->m_IsDefinedBy_inverse.size() > 0 )
{
std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse;
for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i )
{
shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] );
shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition;
if( relating_property_definition_select )
{
// TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet);
shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select);
if( property_set_def )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
continue;
}
shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select);
if( property_set_def_set )
{
std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec;
std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def;
for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def )
{
shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def);
if( property_set_def2 )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
}
}
continue;
}
}
}
}
}
void subtractOpeningsInRelatedObjects(shared_ptr<ProductShapeData>& product_shape)
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
return;
}
shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( !ifc_element )
{
return;
}
if( ifc_element->m_HasOpenings_inverse.size() == 0 )
{
return;
}
// collect aggregated objects
const std::vector<weak_ptr<IfcRelAggregates> >& vec_decomposed_by = ifc_element->m_IsDecomposedBy_inverse;
for( auto& decomposed_by : vec_decomposed_by )
{
if( decomposed_by.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> decomposed_by_aggregates(decomposed_by);
std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = decomposed_by_aggregates->m_RelatedObjects;
for( auto& related_object : vec_related_objects )
{
if( !related_object )
{
continue;
}
std::string guid;
if (related_object->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(related_object->m_GlobalId->m_value);
auto it_find_related_shape = m_product_shape_data.find(guid);
if( it_find_related_shape != m_product_shape_data.end() )
{
shared_ptr<ProductShapeData>& related_product_shape = it_find_related_shape->second;
m_representation_converter->subtractOpenings(ifc_element, related_product_shape);
}
}
}
}
}
virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m )
{
GeometryConverter* myself = (GeometryConverter*)ptr;
if( myself )
{
if( m->m_entity )
{
#ifdef ENABLE_OPENMP
ScopedLock lock( myself->m_writelock_messages );
#endif
// make sure that the same message for one entity does not appear several times
const int entity_id = m->m_entity->m_entity_id;
auto it = myself->m_messages.find( entity_id );
if( it != myself->m_messages.end() )
{
std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second;
for( size_t i = 0; i < vec_message_for_entity.size(); ++i )
{
shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i];
if( existing_message->m_message_text.compare( m->m_message_text ) == 0 )
{
// same message for same entity is already there, so ignore message
return;
}
}
vec_message_for_entity.push_back( m );
}
else
{
std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second;
vec.push_back( m );
}
}
myself->messageCallback( m );
}
}
};
|
GB_unop__identity_int16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_bool)
// op(A') function: GB (_unop_tran__identity_int16_bool)
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_bool)
(
int16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2019 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifndef OWL_CORE_CONV_IMPL
#define OWL_CORE_CONV_IMPL
/*
* Calculate the block sizes for convolution operations.
* Code heavily inspired by Eigen (http://eigen.tuxfamily.org/).
*/
#define IM2COL_THRESHOLD 512 * 1024
#define ALIGN_SIZE 32 // for AVX address alignment
// The effect of calculating block size according to cache sizes is yet to be
// proved here since we use OpenBLAS GEMM directly; also, note that we
// calculate `InputMatrix x KernelMatrix`, not the other way around.
void compute_block_sizes(int* kp, int* mp, int* np, int typesize) {
int l1, l2, l3;
query_cache_sizes(&l1, &l2, &l3);
// set the cache sizes to small numbers when debugging
int k = *kp;
int m = *mp;
int n = *np;
if (fmaxf(k, fmaxf(m, n)) < 50) {
return;
}
int nr = 4;
int num_reg = 16;
int mr = num_reg / (2 * nr) * typesize;
int k_strip = 8;
int k_div = (mr + nr) * typesize;
int k_sub = mr * nr * typesize;
const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1);
const int old_k = k;
if (k > max_kc) {
k = (k % max_kc) == 0 ? max_kc
: max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1)));
//assert (old_k / k == old_k / max_kc);
}
int max_nc;
const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864
const int lhs_bytes = m * k * typesize;
const int rest_l1 = l1 - k_sub - lhs_bytes;
if (rest_l1 >= nr * k * typesize) {
max_nc = rest_l1 / (k * typesize);
} else {
max_nc = (3 * actual_l2) / (4 * max_kc * typesize);
}
int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1));
if (n > nc) {
n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1))));
} else if (old_k == k) {
int kn_size = k * n * typesize;
int actual_lm = actual_l2;
int max_mc = m;
if (kn_size < 1024) {
actual_lm = l1;
} else if (l3 != 0 && kn_size <= 32768) {
actual_lm = l2;
max_mc = fminf(576, max_mc);
}
int mc = fminf(actual_lm / (3 * k * typesize), max_mc);
if (mc > mr) {
mc -= mc % mr;
}
else if (mc == 0) {
*kp = k; *mp = m; *np = n;
return;
}
m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1))));
}
*kp = k; *mp = m; *np = n;
return;
}
#endif /* OWL_CORE_CONV_IMPL */
#ifdef OWL_ENABLE_TEMPLATE
#ifdef AVX_PSIZE
/*
* Fill in temporary input matrix from input tensor with vectorisation.
* Currently only support AVX instruciton set.
*/
void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k,
int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart,
int rstart, int input_cols, int input_rows, short reverse_mode
) {
// assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input
// matrix can always be loaded consecutively by a step of AVX_PSIZE
for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREA(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
}
*cmk_ptr += AVX_PSIZE;
}
return;
}
void ACX_FUN_LOAD (load_sub_matrix, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc,
int k, int kernel_ri, int input_ri, int in_channel, int idx_base,
int cstart, int rstart, int input_cols, int input_rows,
int kernel_rows, short reverse_mode
){
int ik = 0;
// first, load `kc_strip` numbers with a step of AVX_PSIZE;
// assume `kc_strip % AVX_PSIZE == 0`
for ( ; ik < kc_strip; ik += AVX_PSIZE) {
const int cr_set[2] = {(k + ik) / in_channel,
(k + ik + AVX_PSIZE - 1) / in_channel};
const int c_set[2] = {cr_set[0] / kernel_rows,
cr_set[1] / kernel_rows};
const int cols[2] = {cstart + c_set[0], cstart + c_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (cols[0] >= input_cols || cols[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
else if (cols[0] == cols[1]) {
const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows,
cr_set[1] - c_set[1] * kernel_rows};
const int rows[2] = {rstart + r_set[0], rstart + r_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (rows[0] >= input_rows || rows[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
// next AVX_PSIZE numbers can be loaded consecutively
else if (rows[0] >= 0 && rows[1] < input_rows) {
int ki = k + ik - cr_set[0] * in_channel;
int input_index = idx_base + cols[0] * input_ri
+ rows[0] * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREU(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
*cmk_ptr += AVX_PSIZE;
continue;
}
}
// previous special cases do not apply; calculate input index one by one
for (int ip = 0; ip < AVX_PSIZE; ip++) {
int kc = (k + ik + ip) / kernel_ri;
int kri = (k + ik + ip) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
}
// second, load the rest `actual_kc - kc_strip` numbers
for (; ik < actual_kc; ik++) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
return;
}
#endif /* AVX_PSIZE */
/*
* GEBP-based implementation. See Goto et.al [08] for detail.
*/
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_cr = kernel_cols * kernel_rows;
const int kernel_ri = kernel_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
// if generated input matrix is small enough, use im2col implementation
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
for (int k = 0; k < kernel_cri; k += kc) {
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
int actual_kc = fminf(k + kc, kernel_cri) - k;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
// iterate along each row of the generated input matrix; processing four
// rows in parallel with the help of e.g. OpenMP should be possible
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_base = b * input_cri;
// fill in the sub input matrix
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
int idx_kn_base = k * out_channel;
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
// fill in the kernel matrix
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
actual_mc, actual_nc, actual_kc, ALPHA,
temp_mk, actual_kc, temp_kn, actual_nc,
BETA, temp_mn, actual_nc);
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = (ix + m) * out_channel + (iy + n);
output_ptr[index_mn] += temp_mn[cmn++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
idx_mn_base += n;
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
actual_mc, actual_kc, actual_nc, ALPHA,
temp_mn, actual_nc, temp_kn, actual_nc,
BETA, temp_mk, actual_kc);
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 1);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
input_ptr[input_index] += temp_mk[cmk];
}
cmk++;
}
#endif
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_mn_base += n;
idx_kn_base += n;
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
memset(temp_kn, 0, nc * kc * sizeof(TYPE));
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
actual_nc, actual_kc, actual_mc, ALPHA,
temp_mn, actual_nc, temp_mk, actual_kc,
BETA, temp_kn, actual_kc);
int cnk = 0;
for (int jn = 0; jn < actual_nc; jn++) {
for (int ik = 0; ik < actual_kc; ik++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
kernel_ptr[index_kn] = temp_kn[cnk++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
par_coarsen.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
*****************************************************************************/
/* following should be in a header file */
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Selects a coarse "grid" based on the graph of a matrix.
Notes:
\begin{itemize}
\item The underlying matrix storage scheme is a hypre_ParCSR matrix.
\item The routine returns the following:
\begin{itemize}
\item S - a ParCSR matrix representing the "strength matrix". This is
used in the "build interpolation" routine.
\item CF\_marker - an array indicating both C-pts (value = 1) and
F-pts (value = -1)
\end{itemize}
\item We define the following temporary storage:
\begin{itemize}
\item measure\_array - an array containing the "measures" for each
of the fine-grid points
\item graph\_array - an array containing the list of points in the
"current subgraph" being considered in the coarsening process.
\end{itemize}
\item The graph of the "strength matrix" for A is a subgraph of the
graph of A, but requires nonsymmetric storage even if A is
symmetric. This is because of the directional nature of the
"strengh of dependence" notion (see below). Since we are using
nonsymmetric storage for A right now, this is not a problem. If we
ever add the ability to store A symmetrically, then we could store
the strength graph as floats instead of doubles to save space.
\item This routine currently "compresses" the strength matrix. We
should consider the possibility of defining this matrix to have the
same "nonzero structure" as A. To do this, we could use the same
A\_i and A\_j arrays, and would need only define the S\_data array.
There are several pros and cons to discuss.
\end{itemize}
Terminology:
\begin{itemize}
\item Ruge's terminology: A point is "strongly connected to" $j$, or
"strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$.
\item Here, we retain some of this terminology, but with a more
generalized notion of "strength". We also retain the "natural"
graph notation for representing the directed graph of a matrix.
That is, the nonzero entry $a_ij$ is represented as: i --> j. In
the strength matrix, S, the entry $s_ij$ is also graphically denoted
as above, and means both of the following:
\begin{itemize}
\item $i$ "depends on" $j$ with "strength" $s_ij$
\item $j$ "influences" $i$ with "strength" $s_ij$
\end{itemize}
\end{itemize}
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param A [IN]
coefficient matrix
@param strength_threshold [IN]
threshold parameter used to define strength
@param S_ptr [OUT]
strength matrix
@param CF_marker_ptr [IN/OUT]
array indicating C/F points
@see */
/*--------------------------------------------------------------------------*/
#define C_PT 1
#define F_PT -1
#define SF_PT -3
#define COMMON_C_PT 2
#define Z_PT -2
HYPRE_Int
hypre_BoomerAMGCoarsen( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int CF_init,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstColDiag(S);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)hypre_CSRMatrixNumCols(S_diag);
HYPRE_Int num_cols_offd = 0;
hypre_CSRMatrix *S_ext;
HYPRE_Int *S_ext_i = NULL;
HYPRE_BigInt *S_ext_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data;
HYPRE_Real *buf_data;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd;
HYPRE_Real *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *graph_array_offd;
HYPRE_Int graph_size;
HYPRE_BigInt big_graph_size;
HYPRE_Int graph_offd_size;
HYPRE_BigInt global_graph_size;
HYPRE_Int i, j, k, kc, jS, kS, ig, elmt;
HYPRE_Int index, start, my_id, num_procs, jrow, cnt, nnzrow;
HYPRE_Int use_commpkg_A = 0;
HYPRE_Int break_var = 1;
HYPRE_Real wall_time;
HYPRE_Int iter = 0;
HYPRE_BigInt big_k;
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
S_ext = NULL;
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (!comm_pkg)
{
use_commpkg_A = 1;
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
S_diag_j = hypre_CSRMatrixJ(S_diag);
if (num_cols_offd)
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
/*----------------------------------------------------------
* Compute the measures
*
* The measures are currently given by the column sums of S.
* Hence, measure_array[i] is the number of influences
* of variable i.
*
* The measures are augmented by a random number
* between 0 and 1.
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < S_offd_i[num_variables]; i++)
{
measure_array[num_variables + S_offd_j[i]] += 1.0;
}
if (num_procs > 1)
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg,
&measure_array[num_variables], buf_data);
for (i=0; i < S_diag_i[num_variables]; i++)
{
measure_array[S_diag_j[i]] += 1.0;
}
if (num_procs > 1)
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i=0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]
+= buf_data[index++];
}
for (i=num_variables; i < num_variables+num_cols_offd; i++)
{
measure_array[i] = 0;
}
/* this augments the measures */
if (CF_init == 2)
hypre_BoomerAMGIndepSetInit(S, measure_array, 1);
else
hypre_BoomerAMGIndepSetInit(S, measure_array, 0);
/*---------------------------------------------------
* Initialize the graph array
* graph_array contains interior points in elements 0 ... num_variables-1
* followed by boundary values
*---------------------------------------------------*/
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
if (num_cols_offd)
graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
else
graph_array_offd = NULL;
/* initialize measure array and graph array */
for (ig = 0; ig < num_cols_offd; ig++)
graph_array_offd[ig] = ig;
/*---------------------------------------------------
* Initialize the C/F marker array
* C/F marker array contains interior points in elements 0 ...
* num_variables-1 followed by boundary values
*---------------------------------------------------*/
graph_offd_size = num_cols_offd;
/* Allocate CF_marker if not done before */
if (*CF_marker_ptr == NULL)
{
*CF_marker_ptr = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
}
CF_marker = *CF_marker_ptr;
if (CF_init == 1)
{
cnt = 0;
for (i = 0; i < num_variables; i++)
{
if ( CF_marker[i] != SF_PT )
{
if ( (S_offd_i[i+1] - S_offd_i[i]) > 0 ||
(CF_marker[i] == F_PT) )
{
CF_marker[i] = 0;
}
if ( CF_marker[i] == Z_PT)
{
if ( (S_diag_i[i+1] - S_diag_i[i]) > 0 ||
(measure_array[i] >= 1.0) )
{
CF_marker[i] = 0;
graph_array[cnt++] = i;
}
else
{
CF_marker[i] = F_PT;
}
}
else
{
graph_array[cnt++] = i;
}
}
else
{
measure_array[i] = 0;
}
}
}
else
{
cnt = 0;
for (i = 0; i < num_variables; i++)
{
if ( CF_marker[i] != SF_PT )
{
CF_marker[i] = 0;
nnzrow = (S_diag_i[i+1] - S_diag_i[i]) + (S_offd_i[i+1] - S_offd_i[i]);
if (nnzrow == 0)
{
CF_marker[i] = SF_PT;
measure_array[i] = 0;
}
else
{
graph_array[cnt++] = i;
}
}
else
{
measure_array[i] = 0;
}
}
}
graph_size = cnt;
if (num_cols_offd)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
else
CF_marker_offd = NULL;
for (i=0; i < num_cols_offd; i++)
CF_marker_offd[i] = 0;
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
if (num_procs > 1)
{
if (use_commpkg_A)
S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0);
else
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
}
/* compress S_ext and convert column numbers*/
index = 0;
for (i=0; i < num_cols_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
{
big_k = S_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
S_ext_j[index++] = big_k - col_1;
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (kc > -1) S_ext_j[index++] = (HYPRE_BigInt)(-kc-1);
}
}
S_ext_i[i] = index;
}
for (i = num_cols_offd; i > 0; i--)
S_ext_i[i] = S_ext_i[i-1];
if (num_procs > 1) S_ext_i[0] = 0;
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Initialize CLJP phase = %f\n",
my_id, wall_time);
}
while (1)
{
/*------------------------------------------------
* Exchange boundary data, i.i. get measures and S_ext_data
*------------------------------------------------*/
if (num_procs > 1)
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg,
&measure_array[num_variables], buf_data);
if (num_procs > 1)
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i=0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]
+= buf_data[index++];
}
/*------------------------------------------------
* Set F-pts and update subgraph
*------------------------------------------------*/
if (iter || (CF_init != 1))
{
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if ( (CF_marker[i] != C_PT) && (measure_array[i] < 1) )
{
/* set to be an F-pt */
CF_marker[i] = F_PT;
/* make sure all dependencies have been accounted for */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
if (S_diag_j[jS] > -1)
{
CF_marker[i] = 0;
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
if (S_offd_j[jS] > -1)
{
CF_marker[i] = 0;
}
}
}
if (CF_marker[i])
{
measure_array[i] = 0;
/* take point out of the subgraph */
graph_size--;
graph_array[ig] = graph_array[graph_size];
graph_array[graph_size] = i;
ig--;
}
}
}
/*------------------------------------------------
* Exchange boundary data, i.i. get measures
*------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
buf_data[index++] = measure_array[jrow];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data,
&measure_array[num_variables]);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*------------------------------------------------
* Debugging:
*
* Uncomment the sections of code labeled
* "debugging" to generate several files that
* can be visualized using the `coarsen.m'
* matlab routine.
*------------------------------------------------*/
#if 0 /* debugging */
/* print out measures */
hypre_sprintf(filename, "coarsen.out.measures.%04d", iter);
fp = fopen(filename, "w");
for (i = 0; i < num_variables; i++)
{
hypre_fprintf(fp, "%f\n", measure_array[i]);
}
fclose(fp);
/* print out strength matrix */
hypre_sprintf(filename, "coarsen.out.strength.%04d", iter);
hypre_CSRMatrixPrint(S, filename);
/* print out C/F marker */
hypre_sprintf(filename, "coarsen.out.CF.%04d", iter);
fp = fopen(filename, "w");
for (i = 0; i < num_variables; i++)
{
hypre_fprintf(fp, "%d\n", CF_marker[i]);
}
fclose(fp);
iter++;
#endif
/*------------------------------------------------
* Test for convergence
*------------------------------------------------*/
big_graph_size = (HYPRE_BigInt) graph_size;
hypre_MPI_Allreduce(&big_graph_size,&global_graph_size,1,HYPRE_MPI_BIG_INT,hypre_MPI_SUM,comm);
if (global_graph_size == 0)
break;
/*------------------------------------------------
* Pick an independent set of points with
* maximal measure.
*------------------------------------------------*/
if (iter || (CF_init != 1))
{
hypre_BoomerAMGIndepSet(S, measure_array, graph_array,
graph_size,
graph_array_offd, graph_offd_size,
CF_marker, CF_marker_offd);
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg,
CF_marker_offd, int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1);j++) {
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (!int_buf_data[index++] && CF_marker[elmt] > 0)
{
CF_marker[elmt] = 0;
}
}
}
}
iter++;
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
int_buf_data[index++] = CF_marker[elmt];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (ig = 0; ig < graph_offd_size; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i] < 0)
{
/* take point out of the subgraph */
graph_offd_size--;
graph_array_offd[ig] = graph_array_offd[graph_offd_size];
graph_array_offd[graph_offd_size] = i;
ig--;
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d iter %d comm. and subgraph update = %f\n",
my_id, iter, wall_time);
}
/*------------------------------------------------
* Set C_pts and apply heuristics.
*------------------------------------------------*/
for (i=num_variables; i < num_variables+num_cols_offd; i++)
{
measure_array[i] = 0;
}
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
/*---------------------------------------------
* Heuristic: C-pts don't interpolate from
* neighbors that influence them.
*---------------------------------------------*/
if (CF_marker[i] > 0)
{
/* set to be a C-pt */
CF_marker[i] = C_PT;
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
/* decrement measures of unmarked neighbors */
if (!CF_marker[j])
{
measure_array[j]--;
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
/* decrement measures of unmarked neighbors */
if (!CF_marker_offd[j])
{
measure_array[j+num_variables]--;
}
}
}
}
else
{
/* marked dependencies */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j < 0) j = -j-1;
if (CF_marker[j] > 0)
{
if (S_diag_j[jS] > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
}
/* IMPORTANT: consider all dependencies */
/* temporarily modify CF_marker */
CF_marker[j] = COMMON_C_PT;
}
else if (CF_marker[j] == SF_PT)
{
if (S_diag_j[jS] > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j < 0) j = -j-1;
if (CF_marker_offd[j] > 0)
{
if (S_offd_j[jS] > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
}
/* IMPORTANT: consider all dependencies */
/* temporarily modify CF_marker */
CF_marker_offd[j] = COMMON_C_PT;
}
else if (CF_marker_offd[j] == SF_PT)
{
if (S_offd_j[jS] > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
}
}
}
/* unmarked dependencies */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
if (S_diag_j[jS] > -1)
{
j = S_diag_j[jS];
break_var = 1;
/* check for common C-pt */
for (kS = S_diag_i[j]; kS < S_diag_i[j+1]; kS++)
{
k = S_diag_j[kS];
if (k < 0) k = -k-1;
/* IMPORTANT: consider all dependencies */
if (CF_marker[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_diag_j[jS] = -S_diag_j[jS]-1;
measure_array[j]--;
break_var = 0;
break;
}
}
if (break_var)
{
for (kS = S_offd_i[j]; kS < S_offd_i[j+1]; kS++)
{
k = S_offd_j[kS];
if (k < 0) k = -k-1;
/* IMPORTANT: consider all dependencies */
if ( CF_marker_offd[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_diag_j[jS] = -S_diag_j[jS]-1;
measure_array[j]--;
break;
}
}
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
if (S_offd_j[jS] > -1)
{
j = S_offd_j[jS];
/* check for common C-pt */
for (kS = S_ext_i[j]; kS < S_ext_i[j+1]; kS++)
{
k = (HYPRE_Int)S_ext_j[kS];
if (k >= 0)
{
/* IMPORTANT: consider all dependencies */
if (CF_marker[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_offd_j[jS] = -S_offd_j[jS]-1;
measure_array[j+num_variables]--;
break;
}
}
else
{
kc = -k-1;
if (kc > -1 && CF_marker_offd[kc] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_offd_j[jS] = -S_offd_j[jS]-1;
measure_array[j+num_variables]--;
break;
}
}
}
}
}
}
/* reset CF_marker */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j < 0) j = -j-1;
if (CF_marker[j] == COMMON_C_PT)
{
CF_marker[j] = C_PT;
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j < 0) j = -j-1;
if (CF_marker_offd[j] == COMMON_C_PT)
{
CF_marker_offd[j] = C_PT;
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d CLJP phase = %f graph_size = %d nc_offd = %d\n",
my_id, wall_time, graph_size, num_cols_offd);
}
}
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
/* Reset S_matrix */
for (i=0; i < S_diag_i[num_variables]; i++)
{
if (S_diag_j[i] < 0)
S_diag_j[i] = -S_diag_j[i]-1;
}
for (i=0; i < S_offd_i[num_variables]; i++)
{
if (S_offd_j[i] < 0)
S_offd_j[i] = -S_offd_j[i]-1;
}
/*for (i=0; i < num_variables; i++)
if (CF_marker[i] == SF_PT) CF_marker[i] = F_PT;*/
hypre_TFree(measure_array, HYPRE_MEMORY_HOST);
hypre_TFree(graph_array, HYPRE_MEMORY_HOST);
if (num_cols_offd) hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return hypre_error_flag;
}
/*==========================================================================
* Ruge's coarsening algorithm
*==========================================================================*/
#define C_PT 1
#define F_PT -1
#define Z_PT -2
#define SF_PT -3 /* special fine points */
#define SC_PT 3 /* special coarse points */
#define UNDECIDED 0
/**************************************************************
*
* Ruge Coarsening routine
*
**************************************************************/
HYPRE_Int
hypre_BoomerAMGCoarsenRuge( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int coarsen_type,
HYPRE_Int cut_factor,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *A_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *S_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_BigInt num_nonzeros = hypre_ParCSRMatrixNumNonzeros(A);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int avg_nnzrow;
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_BigInt *S_ext_j = NULL;
hypre_CSRMatrix *ST;
HYPRE_Int *ST_i;
HYPRE_Int *ST_j;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int ci_tilde = -1;
HYPRE_Int ci_tilde_mark = -1;
HYPRE_Int ci_tilde_offd = -1;
HYPRE_Int ci_tilde_offd_mark = -1;
HYPRE_Int *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *ci_array = NULL;
HYPRE_BigInt big_k;
HYPRE_Int i, j, k, jS;
HYPRE_Int ji, jj, jk, jm, index;
HYPRE_Int set_empty = 1;
HYPRE_Int C_i_nonempty = 0;
HYPRE_Int cut, nnzrow;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_sends = 0;
HYPRE_BigInt first_col;
HYPRE_Int start;
HYPRE_BigInt col_0, col_n;
hypre_LinkList LoL_head;
hypre_LinkList LoL_tail;
HYPRE_Int *lists, *where;
HYPRE_Int measure, new_meas;
HYPRE_Int meas_type = 0;
HYPRE_Int agg_2 = 0;
HYPRE_Int num_left, elmt;
HYPRE_Int nabor, nabor_two;
HYPRE_Int use_commpkg_A = 0;
HYPRE_Int break_var = 0;
HYPRE_Int f_pnt = F_PT;
HYPRE_Real wall_time;
if (coarsen_type < 0)
{
coarsen_type = -coarsen_type;
}
if (measure_type == 1 || measure_type == 4)
{
meas_type = 1;
}
if (measure_type == 4 || measure_type == 3)
{
agg_2 = 1;
}
/*-------------------------------------------------------
* Initialize the C/F marker, LoL_head, LoL_tail arrays
*-------------------------------------------------------*/
LoL_head = NULL;
LoL_tail = NULL;
lists = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
where = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*--------------------------------------------------------------
* Compute a CSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
first_col = hypre_ParCSRMatrixFirstColDiag(S);
col_0 = first_col-1;
col_n = col_0+(HYPRE_BigInt)num_variables;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (!comm_pkg)
{
use_commpkg_A = 1;
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_cols_offd)
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
jS = S_i[num_variables];
ST = hypre_CSRMatrixCreate(num_variables, num_variables, jS);
hypre_CSRMatrixMemoryLocation(ST) = HYPRE_MEMORY_HOST;
ST_i = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
ST_j = hypre_CTAlloc(HYPRE_Int, jS, HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(ST) = ST_i;
hypre_CSRMatrixJ(ST) = ST_j;
/*----------------------------------------------------------
* generate transpose of S, ST
*----------------------------------------------------------*/
for (i=0; i <= num_variables; i++)
{
ST_i[i] = 0;
}
for (i=0; i < jS; i++)
{
ST_i[S_j[i]+1]++;
}
for (i=0; i < num_variables; i++)
{
ST_i[i+1] += ST_i[i];
}
for (i=0; i < num_variables; i++)
{
for (j=S_i[i]; j < S_i[i+1]; j++)
{
index = S_j[j];
ST_j[ST_i[index]] = i;
ST_i[index]++;
}
}
for (i = num_variables; i > 0; i--)
{
ST_i[i] = ST_i[i-1];
}
ST_i[0] = 0;
/*----------------------------------------------------------
* Compute the measures
*
* The measures are given by the row sums of ST.
* Hence, measure_array[i] is the number of influences
* of variable i.
* correct actual measures through adding influences from
* neighbor processors
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for (i = 0; i < num_variables; i++)
{
measure_array[i] = ST_i[i+1]-ST_i[i];
}
/* special case for Falgout coarsening */
if (coarsen_type == 6)
{
f_pnt = Z_PT;
coarsen_type = 1;
}
if (coarsen_type == 10)
{
f_pnt = Z_PT;
coarsen_type = 11;
}
if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1)
{
if (use_commpkg_A)
S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0);
else
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
HYPRE_Int num_nonzeros = S_ext_i[num_cols_offd];
/*first_col = hypre_ParCSRMatrixFirstColDiag(S);
col_0 = first_col-1;
col_n = col_0+num_variables; */
if (meas_type)
{
for (i=0; i < num_nonzeros; i++)
{
index = (HYPRE_Int)(S_ext_j[i] - first_col);
if (index > -1 && index < num_variables)
measure_array[index]++;
}
}
}
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
/* first coarsening phase */
/*************************************************************
*
* Initialize the lists
*
*************************************************************/
/* Allocate CF_marker if not done before */
if (*CF_marker_ptr == NULL)
{
*CF_marker_ptr = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
}
CF_marker = *CF_marker_ptr;
num_left = 0;
for (j = 0; j < num_variables; j++)
{
if (CF_marker[j] == 0)
{
nnzrow = (S_i[j+1] - S_i[j]) + (S_offd_i[j+1] - S_offd_i[j]);
if (nnzrow == 0)
{
CF_marker[j] = SF_PT;
if (agg_2)
{
CF_marker[j] = SC_PT;
}
measure_array[j] = 0;
}
else
{
CF_marker[j] = UNDECIDED;
num_left++;
}
}
else
{
measure_array[j] = 0;
}
}
/* Set dense rows as SF_PT */
if ((cut_factor > 0) && (global_num_rows > 0))
{
avg_nnzrow = num_nonzeros/global_num_rows;
cut = cut_factor*avg_nnzrow;
for (j = 0; j < num_variables; j++)
{
nnzrow = (A_i[j+1] - A_i[j]) + (A_offd_i[j+1] - A_offd_i[j]);
if (nnzrow > cut)
{
if (CF_marker[j] == UNDECIDED)
{
num_left--;
}
CF_marker[j] = SF_PT;
}
}
}
for (j = 0; j < num_variables; j++)
{
measure = measure_array[j];
if (CF_marker[j] != SF_PT && CF_marker[j] != SC_PT)
{
if (measure > 0)
{
hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, j, lists, where);
}
else
{
if (measure < 0)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"negative measure!\n");
}
CF_marker[j] = f_pnt;
for (k = S_i[j]; k < S_i[j+1]; k++)
{
nabor = S_j[k];
if (CF_marker[nabor] != SF_PT && CF_marker[nabor] != SC_PT)
{
if (nabor < j)
{
new_meas = measure_array[nabor];
if (new_meas > 0)
{
hypre_remove_point(&LoL_head, &LoL_tail, new_meas,
nabor, lists, where);
}
new_meas = ++(measure_array[nabor]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor, lists, where);
}
else
{
new_meas = ++(measure_array[nabor]);
}
}
}
--num_left;
}
}
}
/****************************************************************
*
* Main loop of Ruge-Stueben first coloring pass.
*
* WHILE there are still points to classify DO:
* 1) find first point, i, on list with max_measure
* make i a C-point, remove it from the lists
* 2) For each point, j, in S_i^T,
* a) Set j to be an F-point
* b) For each point, k, in S_j
* move k to the list in LoL with measure one
* greater than it occupies (creating new LoL
* entry if necessary)
* 3) For each point, j, in S_i,
* move j to the list in LoL with measure one
* smaller than it occupies (creating new LoL
* entry if necessary)
*
****************************************************************/
while (num_left > 0)
{
index = LoL_head -> head;
CF_marker[index] = C_PT;
measure = measure_array[index];
measure_array[index] = 0;
--num_left;
hypre_remove_point(&LoL_head, &LoL_tail, measure, index, lists, where);
for (j = ST_i[index]; j < ST_i[index+1]; j++)
{
nabor = ST_j[j];
if (CF_marker[nabor] == UNDECIDED)
{
CF_marker[nabor] = F_PT;
measure = measure_array[nabor];
hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where);
--num_left;
for (k = S_i[nabor]; k < S_i[nabor+1]; k++)
{
nabor_two = S_j[k];
if (CF_marker[nabor_two] == UNDECIDED)
{
measure = measure_array[nabor_two];
hypre_remove_point(&LoL_head, &LoL_tail, measure,
nabor_two, lists, where);
new_meas = ++(measure_array[nabor_two]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
}
}
}
}
for (j = S_i[index]; j < S_i[index+1]; j++)
{
nabor = S_j[j];
if (CF_marker[nabor] == UNDECIDED)
{
measure = measure_array[nabor];
hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where);
measure_array[nabor] = --measure;
if (measure > 0)
{
hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, nabor,
lists, where);
}
else
{
CF_marker[nabor] = F_PT;
--num_left;
for (k = S_i[nabor]; k < S_i[nabor+1]; k++)
{
nabor_two = S_j[k];
if (CF_marker[nabor_two] == UNDECIDED)
{
new_meas = measure_array[nabor_two];
hypre_remove_point(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
new_meas = ++(measure_array[nabor_two]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
}
}
}
}
}
}
hypre_TFree(measure_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(ST);
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen 1st pass = %f\n",
my_id, wall_time);
}
hypre_TFree(lists, HYPRE_MEMORY_HOST);
hypre_TFree(where, HYPRE_MEMORY_HOST);
hypre_TFree(LoL_head, HYPRE_MEMORY_HOST);
hypre_TFree(LoL_tail, HYPRE_MEMORY_HOST);
for (i=0; i < num_variables; i++)
{
if (CF_marker[i] == SC_PT)
{
CF_marker[i] = C_PT;
}
}
if (coarsen_type == 11)
{
if (meas_type && num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
}
return 0;
}
/* second pass, check fine points for coarse neighbors
for coarsen_type = 2, the second pass includes
off-processore boundary points */
/*---------------------------------------------------
* Initialize the graph array
*---------------------------------------------------*/
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for (i = 0; i < num_variables; i++)
{
graph_array[i] = -1;
}
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
if (coarsen_type == 2)
{
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
for (i=0; i < num_variables; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (ci_tilde_offd_mark != i) ci_tilde_offd = -1;
if (CF_marker[i] == -1)
{
break_var = 1;
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] > 0)
ci_array[j] = i;
}
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++)
{
index = S_offd_j[jj];
if (ci_array[index] == i)
{
set_empty = 0;
break;
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break_var = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break_var = 0;
break;
}
}
}
}
if (break_var)
{
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] == -1)
{
set_empty = 1;
for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++)
{
big_k = S_ext_j[jj];
if (big_k > col_0 && big_k < col_n) /* index interior */
{
if (graph_array[(HYPRE_Int)(big_k-first_col)] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde_offd = j;
ci_tilde_offd_mark = i;
CF_marker_offd[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
}
else
{
for (i=0; i < num_variables; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (CF_marker[i] == -1)
{
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
if (debug_flag == 3 && coarsen_type != 2)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen 2nd pass = %f\n",
my_id, wall_time);
}
/* third pass, check boundary fine points for coarse neighbors */
if (coarsen_type == 3 || coarsen_type == 4)
{
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
}
if (coarsen_type > 1 && coarsen_type < 5)
{
for (i=0; i < num_variables; i++)
graph_array[i] = -1;
for (i=0; i < num_cols_offd; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (ci_tilde_offd_mark != i) ci_tilde_offd = -1;
if (CF_marker_offd[i] == -1)
{
for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++)
{
big_k = S_ext_j[ji];
if (big_k > col_0 && big_k < col_n)
{
j = (HYPRE_Int)(big_k - first_col);
if (CF_marker[j] > 0)
graph_array[j] = i;
}
else
{
jj = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (jj != -1 && CF_marker_offd[jj] > 0)
ci_array[jj] = i;
}
}
for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++)
{
big_k = S_ext_j[ji];
if (big_k > col_0 && big_k < col_n)
{
j = (HYPRE_Int)(big_k - first_col);
if ( CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++)
{
index = S_offd_j[jj];
if (ci_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker_offd[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
else
{
jm = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (jm != -1 && CF_marker_offd[jm] == -1)
{
set_empty = 1;
for (jj = S_ext_i[jm]; jj < S_ext_i[jm+1]; jj++)
{
big_k = S_ext_j[jj];
if (big_k > col_0 && big_k < col_n)
{
if (graph_array[(HYPRE_Int)(big_k-first_col)] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker_offd[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde_offd = jm;
ci_tilde_offd_mark = i;
CF_marker_offd[jm] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
/*------------------------------------------------
* Send boundary data for CF_marker back
*------------------------------------------------*/
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd,
int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/* only CF_marker entries from larger procs are accepted
if coarsen_type = 4 coarse points are not overwritten */
index = 0;
if (coarsen_type != 4)
{
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id)
{
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] =
int_buf_data[index++];
}
else
{
index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start;
}
}
}
else
{
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id)
{
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (CF_marker[elmt] != 1)
CF_marker[elmt] = int_buf_data[index];
index++;
}
}
else
{
index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start;
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
if (coarsen_type == 4)
hypre_printf("Proc = %d Coarsen 3rd pass = %f\n",
my_id, wall_time);
if (coarsen_type == 3)
hypre_printf("Proc = %d Coarsen 3rd pass = %f\n",
my_id, wall_time);
if (coarsen_type == 2)
hypre_printf("Proc = %d Coarsen 2nd pass = %f\n",
my_id, wall_time);
}
}
if (coarsen_type == 5)
{
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
for (i=0; i < num_variables; i++)
graph_array[i] = -1;
for (i=0; i < num_variables; i++)
{
if (CF_marker[i] == -1 && (S_offd_i[i+1]-S_offd_i[i]) > 0)
{
break_var = 1;
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] > 0)
ci_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] == -1)
{
set_empty = 1;
for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++)
{
big_k = S_ext_j[jj];
if (big_k > col_0 && big_k < col_n) /* index interior */
{
if (graph_array[(HYPRE_Int)(big_k-first_col)] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = -2;
C_i_nonempty = 0;
break;
}
else
{
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen special points = %f\n",
my_id, wall_time);
}
}
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
/*if (coarsen_type != 1)
{ */
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(ci_array, HYPRE_MEMORY_HOST);
/*} */
hypre_TFree(graph_array, HYPRE_MEMORY_HOST);
if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGCoarsenFalgout( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int cut_factor,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
HYPRE_Int ierr = 0;
/*-------------------------------------------------------
* Perform Ruge coarsening followed by CLJP coarsening
*-------------------------------------------------------*/
ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 6, cut_factor,
debug_flag, CF_marker_ptr);
ierr += hypre_BoomerAMGCoarsen (S, A, 1, debug_flag, CF_marker_ptr);
return (ierr);
}
/*--------------------------------------------------------------------------*/
#define C_PT 1
#define F_PT -1
#define SF_PT -3
#define COMMON_C_PT 2
#define Z_PT -2
/* begin HANS added */
/**************************************************************
*
* Modified Independent Set Coarsening routine
* (don't worry about strong F-F connections
* without a common C point)
*
**************************************************************/
HYPRE_Int
hypre_BoomerAMGCoarsenPMISHost( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int CF_init,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PMIS] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j;
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int num_cols_offd = 0;
/* hypre_CSRMatrix *S_ext;
HYPRE_Int *S_ext_i;
HYPRE_Int *S_ext_j; */
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data;
HYPRE_Real *buf_data;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd;
HYPRE_Real *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *graph_array_offd;
HYPRE_Int graph_size;
HYPRE_BigInt big_graph_size;
HYPRE_Int graph_offd_size;
HYPRE_BigInt global_graph_size;
HYPRE_Int i, j, jj, jS, ig;
HYPRE_Int index, start, my_id, num_procs, jrow, cnt, elmt;
HYPRE_Int nnzrow;
HYPRE_Int ierr = 0;
HYPRE_Real wall_time;
HYPRE_Int iter = 0;
HYPRE_Int *prefix_sum_workspace;
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*******************************************************************************
BEFORE THE INDEPENDENT SET COARSENING LOOP:
measure_array: calculate the measures, and communicate them
(this array contains measures for both local and external nodes)
CF_marker, CF_marker_offd: initialize CF_marker
(separate arrays for local and external; 0=unassigned, negative=F point, positive=C point)
******************************************************************************/
/*--------------------------------------------------------------
* Use the ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: S_data is not used; in stead, only strong columns are retained
* in S_j, which can then be used like S_data
*----------------------------------------------------------------*/
/*S_ext = NULL; */
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds();
}
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
S_diag_j = hypre_CSRMatrixJ(S_diag);
if (num_cols_offd)
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
/*----------------------------------------------------------
* Compute the measures
*
* The measures are currently given by the column sums of S.
* Hence, measure_array[i] is the number of influences
* of variable i.
*
* The measures are augmented by a random number
* between 0 and 1.
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Real, num_variables + num_cols_offd, HYPRE_MEMORY_HOST);
/* first calculate the local part of the sums for the external nodes */
#ifdef HYPRE_USING_OPENMP
HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables + num_cols_offd, HYPRE_MEMORY_HOST);
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i = 0; i < S_offd_i[num_variables]; i++)
{
#pragma omp atomic
measure_array_temp[num_variables + S_offd_j[i]]++;
}
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd; i++)
{
measure_array[i + num_variables] = measure_array_temp[i + num_variables];
}
#else
for (i = 0; i < S_offd_i[num_variables]; i++)
{
measure_array[num_variables + S_offd_j[i]] += 1.0;
}
#endif // HYPRE_USING_OPENMP
/* now send those locally calculated values for the external nodes to the neighboring processors */
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data);
}
/* calculate the local part for the local nodes */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i = 0; i < S_diag_i[num_variables]; i++)
{
#pragma omp atomic
measure_array_temp[S_diag_j[i]]++;
}
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i = 0; i < num_variables; i++)
{
measure_array[i] = measure_array_temp[i];
}
hypre_TFree(measure_array_temp, HYPRE_MEMORY_HOST);
#else
for (i = 0; i < S_diag_i[num_variables]; i++)
{
measure_array[S_diag_j[i]] += 1.0;
}
#endif // HYPRE_USING_OPENMP
/* finish the communication */
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/* now add the externally calculated part of the local nodes to the local nodes */
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++];
}
}
/* set the measures of the external nodes to zero */
for (i = num_variables; i < num_variables + num_cols_offd; i++)
{
measure_array[i] = 0;
}
/* this augments the measures with a random number between 0 and 1 */
/* (only for the local part) */
/* this augments the measures */
if (CF_init == 2 || CF_init == 4)
{
hypre_BoomerAMGIndepSetInit(S, measure_array, 1);
}
else
{
hypre_BoomerAMGIndepSetInit(S, measure_array, 0);
}
/*---------------------------------------------------
* Initialize the graph arrays, and CF_marker arrays
*---------------------------------------------------*/
/* first the off-diagonal part of the graph array */
if (num_cols_offd)
{
graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
else
{
graph_array_offd = NULL;
}
for (ig = 0; ig < num_cols_offd; ig++)
{
graph_array_offd[ig] = ig;
}
graph_offd_size = num_cols_offd;
/* now the local part of the graph array, and the local CF_marker array */
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
/* Allocate CF_marker if not done before */
if (*CF_marker_ptr == NULL)
{
*CF_marker_ptr = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
}
CF_marker = *CF_marker_ptr;
if (CF_init == 1)
{
cnt = 0;
for (i = 0; i < num_variables; i++)
{
if ( CF_marker[i] != SF_PT )
{
if ( S_offd_i[i+1] - S_offd_i[i] > 0 || CF_marker[i] == -1 )
{
CF_marker[i] = 0;
}
if ( CF_marker[i] == Z_PT)
{
if ( measure_array[i] >= 1.0 || S_diag_i[i+1] - S_diag_i[i] > 0 )
{
CF_marker[i] = 0;
graph_array[cnt++] = i;
}
else
{
CF_marker[i] = F_PT;
}
}
else
{
graph_array[cnt++] = i;
}
}
else
{
measure_array[i] = 0;
}
}
}
else
{
cnt = 0;
for (i = 0; i < num_variables; i++)
{
CF_marker[i] = 0;
nnzrow = (S_diag_i[i+1] - S_diag_i[i]) + (S_offd_i[i+1] - S_offd_i[i]);
if (nnzrow == 0)
{
CF_marker[i] = SF_PT; /* an isolated fine grid */
if (CF_init == 3 || CF_init == 4)
{
CF_marker[i] = C_PT;
}
measure_array[i] = 0;
}
else
{
graph_array[cnt++] = i;
}
}
}
graph_size = cnt;
/* now the off-diagonal part of CF_marker */
if (num_cols_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
else
{
CF_marker_offd = NULL;
}
for (i = 0; i < num_cols_offd; i++)
{
CF_marker_offd[i] = 0;
}
/*------------------------------------------------
* Communicate the local measures, which are complete,
to the external nodes
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
buf_data[index++] = measure_array[jrow];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time);
}
/* graph_array2 */
HYPRE_Int *graph_array2 = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int *graph_array_offd2 = NULL;
if (num_cols_offd)
{
graph_array_offd2 = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
/*******************************************************************************
THE INDEPENDENT SET COARSENING LOOP:
******************************************************************************/
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
while (1)
{
big_graph_size = (HYPRE_BigInt) graph_size;
/* stop the coarsening if nothing left to be coarsened */
hypre_MPI_Allreduce(&big_graph_size, &global_graph_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM,comm);
/* if (my_id == 0) { hypre_printf("graph size %b\n", global_graph_size); } */
if (global_graph_size == 0)
{
break;
}
/*
hypre_printf("\n");
hypre_printf("*** MIS iteration %d\n",iter);
hypre_printf("graph_size remaining %d\n",graph_size);
*/
/*-----------------------------------------------------------------------------------------
* Pick an independent set of points with maximal measure
* At the end, CF_marker is complete, but still needs to be communicated to CF_marker_offd
* for CF_init == 1, as in HMIS, the first IS was fed from prior R-S coarsening
*----------------------------------------------------------------------------------------*/
if (!CF_init || iter)
{
/*
hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size,
graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd);
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
CF_marker[i] = 1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_offd_size; ig++)
{
i = graph_array_offd[ig];
if (measure_array[i+num_variables] > 1)
{
CF_marker_offd[i] = 1;
}
}
/*-------------------------------------------------------
* Remove nodes from the initial independent set
*-------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
/* for each local neighbor j of i */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
{
CF_marker[j] = 0;
}
else if (measure_array[j] > measure_array[i])
{
CF_marker[i] = 0;
}
}
}
/* for each offd neighbor j of i */
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
jj = S_offd_j[jS];
j = num_variables + jj;
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
{
CF_marker_offd[jj] = 0;
}
else if (measure_array[j] > measure_array[i])
{
CF_marker[i] = 0;
}
}
}
} /* for each node with measure > 1 */
} /* for each node i */
/*------------------------------------------------------------------------------
* Exchange boundary data for CF_marker: send external CF to internal CF
*------------------------------------------------------------------------------*/
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (!int_buf_data[index] && CF_marker[elmt] > 0)
{
CF_marker[elmt] = 0;
index++;
}
else
{
int_buf_data[index++] = CF_marker[elmt];
}
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
} /* if (!CF_init || iter) */
iter++;
/*------------------------------------------------
* Set C-pts and F-pts.
*------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
/*---------------------------------------------
* If the measure of i is smaller than 1, then
* make i and F point (because it does not influence
* any other point)
*---------------------------------------------*/
if (measure_array[i] < 1)
{
CF_marker[i]= F_PT;
}
/*---------------------------------------------
* First treat the case where point i is in the
* independent set: make i a C point,
*---------------------------------------------*/
if (CF_marker[i] > 0)
{
CF_marker[i] = C_PT;
}
/*---------------------------------------------
* Now treat the case where point i is not in the
* independent set: loop over
* all the points j that influence equation i; if
* j is a C point, then make i an F point.
*---------------------------------------------*/
else
{
/* first the local part */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
/* j is the column number, or the local number of the point influencing i */
j = S_diag_j[jS];
if (CF_marker[j] > 0) /* j is a C-point */
{
CF_marker[i] = F_PT;
}
}
/* now the external part */
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (CF_marker_offd[j] > 0) /* j is a C-point */
{
CF_marker[i] = F_PT;
}
}
} /* end else */
} /* end first loop over graph */
/* now communicate CF_marker to CF_marker_offd, to make
sure that new external F points are known on this processor */
/*------------------------------------------------------------------------------
* Exchange boundary data for CF_marker: send internal points to external points
*------------------------------------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*------------------------------------------------
* Update subgraph
*------------------------------------------------*/
/*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ig,i)
#endif
{
HYPRE_Int private_graph_size_cnt = 0;
HYPRE_Int private_graph_offd_size_cnt = 0;
HYPRE_Int ig_begin, ig_end;
hypre_GetSimpleThreadPartition(&ig_begin, &ig_end, graph_size);
HYPRE_Int ig_offd_begin, ig_offd_end;
hypre_GetSimpleThreadPartition(&ig_offd_begin, &ig_offd_end, graph_offd_size);
for (ig = ig_begin; ig < ig_end; ig++)
{
i = graph_array[ig];
if (CF_marker[i] != 0) /* C or F point */
{
/* the independent set subroutine needs measure 0 for removed nodes */
measure_array[i] = 0;
}
else
{
private_graph_size_cnt++;
}
}
for (ig = ig_offd_begin; ig < ig_offd_end; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i] != 0) /* C of F point */
{
/* the independent set subroutine needs measure 0 for removed nodes */
measure_array[i + num_variables] = 0;
}
else
{
private_graph_offd_size_cnt++;
}
}
hypre_prefix_sum_pair(&private_graph_size_cnt, &graph_size, &private_graph_offd_size_cnt, &graph_offd_size, prefix_sum_workspace);
for (ig = ig_begin; ig < ig_end; ig++)
{
i = graph_array[ig];
if (CF_marker[i] == 0)
{
graph_array2[private_graph_size_cnt++] = i;
}
}
for (ig = ig_offd_begin; ig < ig_offd_end; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i] == 0)
{
graph_array_offd2[private_graph_offd_size_cnt++] = i;
}
}
} /* omp parallel */
HYPRE_Int *temp = graph_array;
graph_array = graph_array2;
graph_array2 = temp;
temp = graph_array_offd;
graph_array_offd = graph_array_offd2;
graph_array_offd2 = temp;
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
} /* end while */
/*
hypre_printf("*** MIS iteration %d\n",iter);
hypre_printf("graph_size remaining %d\n",graph_size);
hypre_printf("num_cols_offd %d\n",num_cols_offd);
for (i=0;i<num_variables;i++)
{
if(CF_marker[i] == 1)
{
hypre_printf("node %d CF %d\n",i,CF_marker[i]);
}
}
*/
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
hypre_TFree(measure_array, HYPRE_MEMORY_HOST);
hypre_TFree(graph_array, HYPRE_MEMORY_HOST);
hypre_TFree(graph_array2, HYPRE_MEMORY_HOST);
hypre_TFree(graph_array_offd2, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
/*if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PMIS] += hypre_MPI_Wtime();
#endif
return (ierr);
}
HYPRE_Int
hypre_BoomerAMGCoarsenPMIS( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int CF_init,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("PMIS");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGCoarsenPMISDevice( S, A, CF_init, debug_flag, CF_marker_ptr );
}
else
#endif
{
ierr = hypre_BoomerAMGCoarsenPMISHost( S, A, CF_init, debug_flag, CF_marker_ptr );
}
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
HYPRE_Int
hypre_BoomerAMGCoarsenHMIS( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int cut_factor,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
HYPRE_Int ierr = 0;
/*-------------------------------------------------------
* Perform Ruge coarsening followed by CLJP coarsening
*-------------------------------------------------------*/
ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 10, cut_factor,
debug_flag, CF_marker_ptr);
ierr += hypre_BoomerAMGCoarsenPMISHost (S, A, 1, debug_flag, CF_marker_ptr);
return (ierr);
}
|
rose_jacobi_sve.c | #include "rex_kmp.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <arm_sve.h>
#include <arm_sve.h>
#define REAL float
static double read_timer_ms()
{
struct timeb tm;
ftime(&tm);
return ((double )tm . time) * 1000.0 + ((double )tm . millitm);
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title,char *name,float *A,int n,int m)
{
printf("%s:\n",title);
int i;
int j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p)
{
int i;
int j;
int xx;
int yy;
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + ( *dx * (i - 1))));
yy = ((int )(- 1.0 + ( *dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p)
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
error = 0.0;
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n",error);
}
void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits);
void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits);
int main(int argc,char *argv[])
{
int status = 0;
int n = 256;
int m = 256;
float alpha = 0.0543;
float tol = 0.0000000001;
float relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1],"%d",&n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
}
else if (argc == 4) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
}
else if (argc == 5) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
}
else if (argc == 6) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
sscanf(argv[5],"%g",&relax);
}
else if (argc == 7) {
sscanf(argv[1],"%d",&n);
sscanf(argv[2],"%d",&m);
sscanf(argv[3],"%g",&alpha);
sscanf(argv[4],"%g",&tol);
sscanf(argv[5],"%g",&relax);
sscanf(argv[6],"%d",&mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
float *u = (float *)(malloc(sizeof(float ) * n * m));
float *uomp = (float *)(malloc(sizeof(float ) * n * m));
float *f = (float *)(malloc(sizeof(float ) * n * m));
float dx;
/* grid spacing in x direction */
float dy;
/* grid spacing in y direction */
initialize(n,m,alpha,&dx,&dy,u,f);
memcpy(uomp,u,sizeof(float ) * n * m);
double elapsed = read_timer_ms();
jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits);
elapsed = read_timer_ms() - elapsed;
printf("seq elasped time(ms): %4f\n",elapsed);
double mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed;
printf("MFLOPS: %12.6g\n",mflops);
puts("================");
elapsed = read_timer_ms();
jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits);
elapsed = read_timer_ms() - elapsed;
printf("OpenMP elasped time(ms): %4f\n",elapsed);
mflops = 0.001 * mits * (n - 2) * (m - 2) * 13 / elapsed;
printf("MFLOPS: %12.6g\n",mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n,m,alpha,dx,dy,u,f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits)
{
int i;
int j;
int k;
float error;
float ax;
float ay;
float b;
float resid;
float uold[n][m];
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < n - 1; i++)
for (j = 1; j < m - 1; j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
printf("Total Number of Iterations: %d\n",k);
printf("Residual: %.15g\n",error);
}
void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits)
{
int i;
int j;
int k;
float error;
float ax;
float ay;
float b;
float resid;
float *tmp = (float *)(malloc(sizeof(float ) * n * m));
float (*uold)[m] = ((float (*)[m])tmp);
float (*u)[m] = ((float (*)[m])u_p);
float (*f)[m] = ((float (*)[m])f_p);
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
while(k <= mits && error > tol){
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++) {
svbool_t __pg1 = svwhilelt_b32(0,m - 1);
for (j = 0; j <= m - 1; j += svcntw()) {
float *__ptr39 = uold[i];
float *__ptr40 = u[i];
svfloat32_t __vec41 = svld1(__pg1,&__ptr40[j]);
svst1(__pg1,&__ptr39[j],__vec41);
__pg1 = svwhilelt_b32(j,m - 1);
}
}
for (i = 1; i < n - 1; i++) {
svbool_t __pg0 = svwhilelt_b32(0,m - 1 - 1);
svfloat32_t __vec0 = svdup_f32(ax);
svfloat32_t __vec7 = svdup_f32(ay);
svfloat32_t __vec15 = svdup_f32(b);
svfloat32_t __vec23 = svdup_f32(b);
svfloat32_t __part25 = svdup_f32(0.00000L);
svfloat32_t __vec29 = svdup_f32(omega);
svfloat32_t __vec30 = svdup_f32(resid);
svfloat32_t __vec33 = svdup_f32(error);
svfloat32_t __vec34 = svdup_f32(resid);
svfloat32_t __vec35 = svdup_f32(resid);
svfloat32_t __part38 = svdup_f32(0.00000L);
for (j = 1; j <= m - 1 - 1; j += svcntw()) {
float *__ptr1 = uold[i - 1];
svfloat32_t __vec2 = svld1(__pg0,&__ptr1[j]);
float *__ptr3 = uold[i + 1];
svfloat32_t __vec4 = svld1(__pg0,&__ptr3[j]);
svfloat32_t __vec5 = svadd_f32_m(__pg0,__vec4,__vec2);
svfloat32_t __vec6 = svmul_f32_m(__pg0,__vec5,__vec0);
float *__ptr8 = uold[i];
svfloat32_t __vec9 = svld1(__pg0,&__ptr8[j - 1]);
float *__ptr10 = uold[i];
svfloat32_t __vec11 = svld1(__pg0,&__ptr10[j + 1]);
svfloat32_t __vec12 = svadd_f32_m(__pg0,__vec11,__vec9);
svfloat32_t __vec13 = svmul_f32_m(__pg0,__vec12,__vec7);
svfloat32_t __vec14 = svadd_f32_m(__pg0,__vec13,__vec6);
float *__ptr16 = uold[i];
svfloat32_t __vec17 = svld1(__pg0,&__ptr16[j]);
svfloat32_t __vec18 = svmul_f32_m(__pg0,__vec17,__vec15);
svfloat32_t __vec19 = svadd_f32_m(__pg0,__vec18,__vec14);
float *__ptr20 = f[i];
svfloat32_t __vec21 = svld1(__pg0,&__ptr20[j]);
svfloat32_t __vec22 = svsub_f32_m(__pg0,__vec21,__vec19);
svfloat32_t __vec24 = svdiv_f32_m(__pg0,__vec23,__vec22);
__part25 = svadd_f32_m(__pg0,__part25,__vec24);
float *__ptr26 = u[i];
float *__ptr27 = uold[i];
svfloat32_t __vec28 = svld1(__pg0,&__ptr27[j]);
svfloat32_t __vec31 = svmul_f32_m(__pg0,__vec30,__vec29);
svfloat32_t __vec32 = svsub_f32_m(__pg0,__vec31,__vec28);
svst1(__pg0,&__ptr26[j],__vec32);
svfloat32_t __vec36 = svmul_f32_m(__pg0,__vec35,__vec34);
svfloat32_t __vec37 = svadd_f32_m(__pg0,__vec36,__vec33);
__part38 = svadd_f32_m(__pg0,__part38,__vec37);
__pg0 = svwhilelt_b32(j,m - 1 - 1);
}
float __buf1[(svcntw())];
__pg0 = svwhilelt_b32((uint64_t )0,(svcntw()));
svst1(__pg0,&__buf1,__part38);
for (int __i = 0; __i < svcntw(); ++__i)
error += __buf1[__i];
float __buf0[(svcntw())];
__pg0 = svwhilelt_b32((uint64_t )0,(svcntw()));
svst1(__pg0,&__buf0,__part25);
for (int __i = 0; __i < svcntw(); ++__i)
resid += __buf0[__i];
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
printf("Total Number of Iterations: %d\n",k);
printf("Residual: %.15g\n",error);
free(tmp);
}
|
H2Pack_matvec.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "H2Pack_config.h"
#include "H2Pack_typedef.h"
#include "H2Pack_aux_structs.h"
#include "H2Pack_matvec.h"
#include "H2Pack_utils.h"
#include "utils.h"
// Calculate GEMV A * x0 and A^T * x1 in one run to reduce bandwidth pressure
// Input parameters:
// nrow : Number of rows in the matrix
// ncol : Number of columns in the matrix
// mat : Matrix, size >= nrow * ldm
// ldm : Leading dimension of the matrix, >= ncol
// x_in_0 : Input vector 0
// x_in_1 : Input vector 1
// Output parameter:
// x_out_0 : Output vector 0, := mat * x_in_0
// x_out_1 : Output vector 1, := mat^T * x_in_1
void CBLAS_BI_GEMV(
const int nrow, const int ncol, const DTYPE *mat, const int ldm,
const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1
)
{
const int nrow_2 = (nrow / 2) * 2;
for (int i = 0; i < nrow_2; i += 2)
{
const DTYPE *mat_irow0 = mat + (i + 0) * ldm;
const DTYPE *mat_irow1 = mat + (i + 1) * ldm;
const DTYPE x_in_1_i0 = x_in_1[i + 0];
const DTYPE x_in_1_i1 = x_in_1[i + 1];
DTYPE sum0 = 0, sum1 = 0;
#pragma omp simd
for (int j = 0; j < ncol; j++)
{
DTYPE x_in_0_j = x_in_0[j];
sum0 += mat_irow0[j] * x_in_0_j;
sum1 += mat_irow1[j] * x_in_0_j;
DTYPE tmp = x_in_1_i0 * mat_irow0[j];
tmp += x_in_1_i1 * mat_irow1[j];
x_out_1[j] += tmp;
}
x_out_0[i + 0] += sum0;
x_out_0[i + 1] += sum1;
}
for (int i = nrow_2; i < nrow; i++)
{
const DTYPE *mat_irow = mat + i * ldm;
const DTYPE x_in_1_i = x_in_1[i];
DTYPE sum = 0;
#pragma omp simd
for (int j = 0; j < ncol; j++)
{
sum += mat_irow[j] * x_in_0[j];
x_out_1[j] += x_in_1_i * mat_irow[j];
}
x_out_0[i] += sum;
}
}
// Initialize auxiliary array y0 used in H2 matvec forward transformation
void H2P_matvec_init_y0(H2Pack_p h2pack)
{
if (h2pack->y0 != NULL) return;
int n_node = h2pack->n_node;
h2pack->y0 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
ASSERT_PRINTF(
h2pack->y0 != NULL,
"Failed to allocate %d H2P_dense_mat_t for H2 matvec buffer\n", n_node
);
H2P_dense_mat_p *y0 = h2pack->y0;
H2P_dense_mat_p *U = h2pack->U;
for (int node = 0; node < n_node; node++)
{
int ncol = U[node]->ncol;
if (ncol > 0)
{
H2P_dense_mat_init(&y0[node], ncol, 1);
} else {
H2P_dense_mat_init(&y0[node], 0, 0);
y0[node]->nrow = 0;
y0[node]->ncol = 0;
y0[node]->ld = 0;
}
}
}
// H2 matvec forward transformation, calculate U_j^T * x_j
void H2P_matvec_fwd_transform(H2Pack_p h2pack, const DTYPE *x)
{
int n_thread = h2pack->n_thread;
int max_child = h2pack->max_child;
int n_leaf_node = h2pack->n_leaf_node;
int max_level = h2pack->max_level;
int min_adm_level = (h2pack->is_HSS) ? h2pack->HSS_min_adm_level : h2pack->min_adm_level;
int *children = h2pack->children;
int *n_child = h2pack->n_child;
int *level_n_node = h2pack->level_n_node;
int *level_nodes = h2pack->level_nodes;
int *mat_cluster = h2pack->mat_cluster;
H2P_thread_buf_p *thread_buf = h2pack->tb;
H2P_matvec_init_y0(h2pack);
H2P_dense_mat_p *y0 = h2pack->y0;
H2P_dense_mat_p *U = h2pack->U;
for (int i = max_level; i >= min_adm_level; i--)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
int n_thread_i = MIN(level_i_n_node, n_thread);
#pragma omp parallel num_threads(n_thread_i)
{
int tid = omp_get_thread_num();
thread_buf[tid]->timer = -get_wtime_sec();
#pragma omp for schedule(dynamic) nowait
for (int j = 0; j < level_i_n_node; j++)
{
int node = level_i_nodes[j];
int n_child_node = n_child[node];
H2P_dense_mat_p U_node = U[node];
H2P_dense_mat_resize(y0[node], U_node->ncol, 1);
if (n_child_node == 0)
{
// Leaf node, directly calculate U_j^T * x_j
const DTYPE *x_spos = x + mat_cluster[node * 2];
CBLAS_GEMV(
CblasRowMajor, CblasTrans, U_node->nrow, U_node->ncol,
1.0, U_node->data, U_node->ld,
x_spos, 1, 0.0, y0[node]->data, 1
);
} else {
// Non-leaf node, multiple U{node}^T with each child node y0 directly
int *node_children = children + node * max_child;
int U_srow = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = node_children[k];
H2P_dense_mat_p y0_k = y0[child_k];
DTYPE *U_node_k = U_node->data + U_srow * U_node->ld;
DTYPE beta = (k == 0) ? 0.0 : 1.0;
CBLAS_GEMV(
CblasRowMajor, CblasTrans, y0_k->nrow, U_node->ncol,
1.0, U_node_k, U_node->ld, y0_k->data, 1, beta, y0[node]->data, 1
);
U_srow += y0_k->nrow;
}
} // End of "if (n_child_node == 0)"
} // End of j loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread_i; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread_i;
INFO_PRINTF("Matvec forward transformation: level %d, %d/%d threads, %d nodes\n", i, n_thread_i, n_thread, level_i_n_node);
INFO_PRINTF(" min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
} // End of i loop
}
// Transpose y0[i] from a npt*krnl_dim-by-1 vector (npt-by-krnl_dim
// matrix) to a krnl_dim-by-npt matrix
void H2P_transpose_y0_from_krnldim(H2Pack_p h2pack)
{
int n_node = h2pack->n_node;
int n_thread = h2pack->n_thread;
int krnl_dim = h2pack->krnl_dim;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p y0_tmp = h2pack->tb[tid]->mat0;
#pragma omp for schedule(dynamic)
for (int node = 0; node < n_node; node++)
{
H2P_dense_mat_p y0_node = h2pack->y0[node];
if (y0_node->ld == 0) continue;
int y0_len = y0_node->nrow;
int y0_npt = y0_len / krnl_dim;
H2P_dense_mat_resize(y0_tmp, y0_len, 1);
H2P_transpose_dmat(1, y0_npt, krnl_dim, y0_node->data, krnl_dim, y0_tmp->data, y0_npt);
memcpy(y0_node->data, y0_tmp->data, sizeof(DTYPE) * y0_len);
}
}
}
// Transpose y1[i] from a krnl_dim-by-npt matrix to
// a npt*krnl_dim-by-1 vector (npt-by-krnl_dim matrix)
void H2P_transpose_y1_to_krnldim(H2Pack_p h2pack)
{
int n_node = h2pack->n_node;
int n_thread = h2pack->n_thread;
int krnl_dim = h2pack->krnl_dim;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p y1_tmp = h2pack->tb[tid]->mat0;
#pragma omp for schedule(dynamic)
for (int node = 0; node < n_node; node++)
{
H2P_dense_mat_p y1_node = h2pack->y1[node];
if (y1_node->ld == 0) continue;
int y1_len = y1_node->ncol;
int y1_npt = y1_len / krnl_dim;
H2P_dense_mat_resize(y1_tmp, y1_len, 1);
H2P_transpose_dmat(1, krnl_dim, y1_npt, y1_node->data, y1_npt, y1_tmp->data, krnl_dim);
memcpy(y1_node->data, y1_tmp->data, sizeof(DTYPE) * y1_len);
}
}
}
// Initialize auxiliary array y1 used in H2 matvec intermediate multiplication
void H2P_matvec_init_y1(H2Pack_p h2pack)
{
int n_node = h2pack->n_node;
int n_thread = h2pack->n_thread;
int *node_n_r_adm = (h2pack->is_HSS == 1) ? h2pack->node_n_r_inadm : h2pack->node_n_r_adm;
H2P_dense_mat_p *U = h2pack->U;
if (h2pack->y1 == NULL)
{
h2pack->y1 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_node);
ASSERT_PRINTF(
h2pack->y1 != NULL,
"Failed to allocate %d H2P_dense_mat_t for H2 matvec buffer\n", n_node
);
for (int i = 0; i < n_node; i++)
H2P_dense_mat_init(&h2pack->y1[i], 0, 0);
}
H2P_dense_mat_p *y1 = h2pack->y1;
// Use ld to mark if y1[i] is visited in this intermediate sweep
// The first U[i]->ncol elements in y1[i]->data will be used in downward sweep
for (int i = 0; i < n_node; i++)
{
y1[i]->ld = 0;
if (node_n_r_adm[i]) H2P_dense_mat_resize(y1[i], n_thread, U[i]->ncol);
}
// Each thread set its y1 buffer to 0 (NUMA first touch)
#pragma omp parallel
{
int tid = omp_get_thread_num();
for (int i = 0; i < n_node; i++)
{
if (y1[i]->ld == 0) continue;
DTYPE *y1_i_thread = y1[i]->data + tid * y1[i]->ncol;
memset(y1_i_thread, 0, sizeof(DTYPE) * y1[i]->ncol);
}
}
}
// Sum thread-local buffers to obtain final y1 results
void H2P_matvec_sum_y1_thread(H2Pack_p h2pack)
{
int n_node = h2pack->n_node;
int n_thread = h2pack->n_thread;
H2P_dense_mat_p *y1 = h2pack->y1;
H2P_thread_buf_p *thread_buf = h2pack->tb;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
thread_buf[tid]->timer -= get_wtime_sec();
#pragma omp for schedule(dynamic) nowait
for (int i = 0; i < n_node; i++)
{
if (y1[i]->ld == 0) continue;
int ncol = y1[i]->ncol;
DTYPE *dst_row = y1[i]->data;
for (int j = 1; j < n_thread; j++)
{
DTYPE *src_row = y1[i]->data + j * ncol;
#pragma omp simd
for (int k = 0; k < ncol; k++)
dst_row[k] += src_row[k];
}
}
thread_buf[tid]->timer += get_wtime_sec();
}
}
// Calculate H2 matvec intermediate multiplication task block on a thread
void H2P_matvec_intmd_mult_AOT_task_block(
H2Pack_p h2pack, const int tid,
const int i_blk, const DTYPE *x, DTYPE *y
)
{
int *r_adm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_adm_pairs : h2pack->r_adm_pairs;
int *node_level = h2pack->node_level;
int *mat_cluster = h2pack->mat_cluster;
int *B_nrow = h2pack->B_nrow;
int *B_ncol = h2pack->B_ncol;
size_t *B_ptr = h2pack->B_ptr;
DTYPE *B_data = h2pack->B_data;
H2P_int_vec_p B_blk = h2pack->B_blk;
H2P_dense_mat_p *y0 = h2pack->y0;
H2P_dense_mat_p *y1 = h2pack->y1;
int B_blk_s = B_blk->data[i_blk];
int B_blk_e = B_blk->data[i_blk + 1];
for (int i = B_blk_s; i < B_blk_e; i++)
{
int node0 = r_adm_pairs[2 * i];
int node1 = r_adm_pairs[2 * i + 1];
int level0 = node_level[node0];
int level1 = node_level[node1];
DTYPE *Bi = B_data + B_ptr[i];
int Bi_nrow = B_nrow[i];
int Bi_ncol = B_ncol[i];
// (1) Two nodes are of the same level, compress on both sides
if (level0 == level1)
{
int ncol0 = y1[node0]->ncol;
int ncol1 = y1[node1]->ncol;
DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0;
DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1;
CBLAS_BI_GEMV(
Bi_nrow, Bi_ncol, Bi, Bi_ncol,
y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1
);
}
// (2) node1 is a leaf node and its level is higher than node0's level,
// only compressed on node0's side, node1's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 > level1)
{
int vec_s1 = mat_cluster[node1 * 2];
DTYPE *y_spos = y + vec_s1;
const DTYPE *x_spos = x + vec_s1;
int ncol0 = y1[node0]->ncol;
DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0;
CBLAS_BI_GEMV(
Bi_nrow, Bi_ncol, Bi, Bi_ncol,
x_spos, y0[node0]->data, y1_dst_0, y_spos
);
}
// (3) node0 is a leaf node and its level is higher than node1's level,
// only compressed on node1's side, node0's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 < level1)
{
int vec_s0 = mat_cluster[node0 * 2];
DTYPE *y_spos = y + vec_s0;
const DTYPE *x_spos = x + vec_s0;
int ncol1 = y1[node1]->ncol;
DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1;
CBLAS_BI_GEMV(
Bi_nrow, Bi_ncol, Bi, Bi_ncol,
y0[node1]->data, x_spos, y_spos, y1_dst_1
);
}
} // End of i loop
}
// H2 matvec intermediate multiplication, calculate B_{ij} * (U_j^T * x_j)
// All B_{ij} matrices have been calculated and stored
void H2P_matvec_intmd_mult_AOT(H2Pack_p h2pack, const DTYPE *x)
{
int n_thread = h2pack->n_thread;
H2P_int_vec_p B_blk = h2pack->B_blk;
H2P_thread_buf_p *thread_buf = h2pack->tb;
// 1. Initialize y1
H2P_matvec_init_y1(h2pack);
// 2. Intermediate sweep
// If (n_B_blk <= n_thread), B is constructed in H2Pack using a static workload
// partitioning and NUMA first-touch optimization, we also use the same static
// workload partitioning here for NUMA optimization. Otherwise, use OpenMP dynamic
// scheduler for load balance.
const int n_B_blk = B_blk->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
DTYPE *y = thread_buf[tid]->y;
thread_buf[tid]->timer = -get_wtime_sec();
if (n_B_blk <= n_thread)
{
int i_blk = tid;
if (i_blk < n_B_blk)
H2P_matvec_intmd_mult_AOT_task_block(h2pack, tid, i_blk, x, y);
} else {
#pragma omp for schedule(dynamic) nowait
for (int i_blk = 0; i_blk < n_B_blk; i_blk++)
H2P_matvec_intmd_mult_AOT_task_block(h2pack, tid, i_blk, x, y);
}
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
// 3. Sum thread-local buffers in y1
H2P_matvec_sum_y1_thread(h2pack);
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec intermediate multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// Extend the number of points to a multiple of SIMD_LEN and perform an n-body bi-matvec
// Input parameters:
// coord0 : Matrix, size dim-by-ld0, coordinates of the 1st point set
// ld0 : Leading dimension of coord0, should be >= n0
// n0 : Number of points in coord0 (each column in coord0 is a coordinate)
// coord1 : Matrix, size dim-by-ld1, coordinates of the 2nd point set
// ld1 : Leading dimension of coord1, should be >= n1
// n1 : Number of points in coord1 (each column in coord0 is a coordinate)
// x_in_0 : Matrix, size >= krnl_dim * n1, will be left multiplied by kernel_matrix(coord0, coord1)
// x_in_1 : Matrix, size >= krnl_dim * n0, will be left multiplied by kernel_matrix(coord1, coord0)
// ldi0, ldi1 : Leading dimensions of x_in_0 and x_in_1
// ldo0, ldo1 : Leading dimensions of x_out_0 and x_out_1
// xpt_dim : Dimension of extended point coordinate
// krnl_dim : Dimension of tensor kernel's return
// workbuf : H2P_dense_mat data structure for allocating working buffer
// krnl_param : Pointer to kernel function parameter array
// krnl_bimv : Pointer to kernel matrix bi-matvec function
// Output parameter:
// x_out_0 : Matrix, size >= krnl_dim * n0, x_out_0 += kernel_matrix(coord0, coord1) * x_in_0
// x_out_1 : Matrix, size >= krnl_dim * n1, x_out_1 += kernel_matrix(coord1, coord0) * x_in_1
// Note:
// For x_{in,out}_{0,1}, they are not stored as the original (n{0,1} * krnl_dim)-by-1 column vector,
// which can be viewed as n{0,1}-by-krnl_dim matrices. Instead, they are stored as krnl_dim-by-n{0,1}
// matrices so the krnl_bimv can vectorize the load and store.
void H2P_ext_krnl_bimv(
const DTYPE *coord0, const int ld0, const int n0,
const DTYPE *coord1, const int ld1, const int n1,
const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1,
const int ldi0, const int ldi1, const int ldo0, const int ldo1,
const int xpt_dim, const int krnl_dim, H2P_dense_mat_p workbuf,
const void *krnl_param, kernel_bimv_fptr krnl_bimv
)
{
int n0_ext = (n0 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN;
int n1_ext = (n1 + SIMD_LEN - 1) / SIMD_LEN * SIMD_LEN;
int n01_ext = n0_ext + n1_ext;
int buf_size = (xpt_dim + krnl_dim) * n01_ext * 2;
H2P_dense_mat_resize(workbuf, 1, buf_size);
DTYPE *trg_coord = workbuf->data;
DTYPE *src_coord = trg_coord + xpt_dim * n0_ext;
DTYPE *x_in_0_ = src_coord + xpt_dim * n1_ext;
DTYPE *x_in_1_ = x_in_0_ + n1_ext * krnl_dim;
DTYPE *x_out_0_ = x_in_1_ + n0_ext * krnl_dim;
DTYPE *x_out_1_ = x_out_0_ + n0_ext * krnl_dim;
// Copy coordinates and pad the extend part
for (int i = 0; i < xpt_dim; i++)
{
const DTYPE *c0_src = coord0 + i * ld0;
const DTYPE *c1_src = coord1 + i * ld1;
DTYPE *c0_dst = trg_coord + i * n0_ext;
DTYPE *c1_dst = src_coord + i * n1_ext;
memcpy(c0_dst, c0_src, sizeof(DTYPE) * n0);
memcpy(c1_dst, c1_src, sizeof(DTYPE) * n1);
for (int j = n0; j < n0_ext; j++) c0_dst[j] = 0;
for (int j = n1; j < n1_ext; j++) c1_dst[j] = 0;
}
// Copy input vectors and initialize output vectors
// Must set the last n{0,1}_ext - n{0,1} elements in each row to 0,
// otherwise tensor kernel results might be incorrect
for (int i = 0; i < krnl_dim; i++)
{
const DTYPE *src = x_in_0 + i * ldi0;
DTYPE *dst = x_in_0_ + i * n1_ext;
memcpy(dst, src, sizeof(DTYPE) * n1);
for (int j = n1; j < n1_ext; j++) dst[j] = 0;
}
memset(x_out_0_, 0, sizeof(DTYPE) * n0_ext * krnl_dim);
for (int i = 0; i < krnl_dim; i++)
{
const DTYPE *src = x_in_1 + i * ldi1;
DTYPE *dst = x_in_1_ + i * n0_ext;
memcpy(dst, src, sizeof(DTYPE) * n0);
for (int j = n0; j < n0_ext; j++) dst[j] = 0;
}
memset(x_out_1_, 0, sizeof(DTYPE) * n1_ext * krnl_dim);
// Do the n-body bi-matvec
krnl_bimv(
trg_coord, n0_ext, n0_ext,
src_coord, n1_ext, n1_ext,
krnl_param, x_in_0_, x_in_1_, x_out_0_, x_out_1_
);
// Add results back to original output vectors
for (int i = 0; i < krnl_dim; i++)
{
DTYPE *dst = x_out_0 + i * ldo0;
DTYPE *src = x_out_0_ + i * n0_ext;
#pragma omp simd
for (int j = 0; j < n0; j++) dst[j] += src[j];
}
for (int i = 0; i < krnl_dim; i++)
{
DTYPE *dst = x_out_1 + i * ldo1;
DTYPE *src = x_out_1_ + i * n1_ext;
#pragma omp simd
for (int j = 0; j < n1; j++) dst[j] += src[j];
}
}
// Evaluate a kernel matrix block, then perform a bi-matvec using this kernel matrix block
// Input parameters:
// coord0 : Matrix, size dim-by-ld0, coordinates of the 1st point set
// ld0 : Leading dimension of coord0, should be >= n0
// n0 : Number of points in coord0 (each column in coord0 is a coordinate)
// coord1 : Matrix, size dim-by-ld1, coordinates of the 2nd point set
// ld1 : Leading dimension of coord1, should be >= n1
// n1 : Number of points in coord1 (each column in coord0 is a coordinate)
// x_in_0 : Vector, size >= n1 * krnl_dim, will be left multiplied by kernel_matrix(coord0, coord1)
// x_in_1 : Vector, size >= n0 * krnl_dim, will be left multiplied by kernel_matrix(coord1, coord0)
// krnl_dim : Dimension of tensor kernel's return
// npt_row_blk : Blocking size for coord0 points
// krnl_param : Pointer to kernel function parameter array
// krnl_eval : Pointer to kernel matrix evaluation function
// Output parameter:
// x_out_0 : Vector, size >= n0 * krnl_dim, x_out_0 += kernel_matrix(coord0, coord1) * x_in_0
// x_out_1 : Vector, size >= n1 * krnl_dim, x_out_1 += kernel_matrix(coord1, coord0) * x_in_1
void H2P_krnl_eval_bimv(
const DTYPE *coord0, const int ld0, const int n0,
const DTYPE *coord1, const int ld1, const int n1,
const DTYPE *x_in_0, const DTYPE *x_in_1, DTYPE *x_out_0, DTYPE *x_out_1,
const int krnl_dim, const int npt_row_blk, DTYPE *matbuf,
const void *krnl_param, kernel_eval_fptr krnl_eval
)
{
const int ldm = n1 * krnl_dim;
for (int blk_pt_s = 0; blk_pt_s < n0; blk_pt_s += npt_row_blk)
{
int blk_npt = (blk_pt_s + npt_row_blk > n0) ? (n0 - blk_pt_s) : npt_row_blk;
int blk_srow = blk_pt_s * krnl_dim;
int blk_nrow = blk_npt * krnl_dim;
krnl_eval(
coord0 + blk_pt_s, ld0, blk_npt,
coord1, ld1, n1, krnl_param, matbuf, ldm
);
CBLAS_BI_GEMV(
blk_nrow, ldm, matbuf, ldm,
x_in_0, x_in_1 + blk_srow,
x_out_0 + blk_srow, x_out_1
);
}
}
// H2 matvec intermediate multiplication, calculate B_{ij} * (U_j^T * x_j)
// Need to calculate all B_{ij} matrices before using it
void H2P_matvec_intmd_mult_JIT(H2Pack_p h2pack, const DTYPE *x)
{
int xpt_dim = h2pack->xpt_dim;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int n_thread = h2pack->n_thread;
int *r_adm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_adm_pairs : h2pack->r_adm_pairs;
int *node_level = h2pack->node_level;
int *pt_cluster = h2pack->pt_cluster;
int *mat_cluster = h2pack->mat_cluster;
int *B_nrow = h2pack->B_nrow;
int *B_ncol = h2pack->B_ncol;
DTYPE *coord = h2pack->coord;
void *krnl_param = h2pack->krnl_param;
H2P_int_vec_p B_blk = h2pack->B_blk;
H2P_dense_mat_p *y0 = h2pack->y0;
H2P_dense_mat_p *J_coord = h2pack->J_coord;
kernel_eval_fptr krnl_eval = h2pack->krnl_eval;
kernel_bimv_fptr krnl_bimv = h2pack->krnl_bimv;
H2P_thread_buf_p *thread_buf = h2pack->tb;
// 1. Initialize y1
H2P_matvec_init_y1(h2pack);
H2P_dense_mat_p *y1 = h2pack->y1;
// 2. Intermediate sweep
const int n_B_blk = B_blk->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p Bi = thread_buf[tid]->mat0;
DTYPE *y = thread_buf[tid]->y;
H2P_dense_mat_p workbuf = thread_buf[tid]->mat1;
thread_buf[tid]->timer = -get_wtime_sec();
#pragma omp for schedule(dynamic) nowait
for (int i_blk = 0; i_blk < n_B_blk; i_blk++)
{
int B_blk_s = B_blk->data[i_blk];
int B_blk_e = B_blk->data[i_blk + 1];
for (int i = B_blk_s; i < B_blk_e; i++)
{
int node0 = r_adm_pairs[2 * i];
int node1 = r_adm_pairs[2 * i + 1];
int level0 = node_level[node0];
int level1 = node_level[node1];
int Bi_nrow = B_nrow[i];
int Bi_ncol = B_ncol[i];
int Bi_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Bi_ncol);
int Bi_blk_npt = Bi_nrow_128KB / krnl_dim;
Bi_nrow_128KB = Bi_blk_npt * krnl_dim;
H2P_dense_mat_resize(Bi, Bi_nrow_128KB, Bi_ncol);
// (1) Two nodes are of the same level, compress on both sides
if (level0 == level1)
{
int ncol0 = y1[node0]->ncol;
int ncol1 = y1[node1]->ncol;
DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0;
DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1;
if (krnl_bimv != NULL)
{
int node0_npt = Bi_nrow / krnl_dim;
int node1_npt = Bi_ncol / krnl_dim;
H2P_ext_krnl_bimv(
J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol,
J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol,
y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1,
node1_npt, node0_npt, node0_npt, node1_npt,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv
);
} else {
H2P_krnl_eval_bimv(
J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol,
J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol,
y0[node1]->data, y0[node0]->data, y1_dst_0, y1_dst_1,
krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval
);
}
}
// (2) node1 is a leaf node and its level is higher than node0's level,
// only compressed on node0's side, node1's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 > level1)
{
int pt_s1 = pt_cluster[node1 * 2];
int node1_npt = pt_cluster[node1 * 2 + 1] - pt_s1 + 1;
int vec_s1 = mat_cluster[node1 * 2];
int ncol0 = y1[node0]->ncol;
DTYPE *y1_dst_0 = y1[node0]->data + tid * ncol0;
if (krnl_bimv != NULL)
{
const DTYPE *x_spos = x + pt_s1;
DTYPE *y_spos = y + pt_s1;
int node0_npt = Bi_nrow / krnl_dim;
H2P_ext_krnl_bimv(
J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol,
coord + pt_s1, n_point, node1_npt,
x_spos, y0[node0]->data, y1_dst_0, y_spos,
n_point, node0_npt, node0_npt, n_point,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv
);
} else {
const DTYPE *x_spos = x + vec_s1;
DTYPE *y_spos = y + vec_s1;
H2P_krnl_eval_bimv(
J_coord[node0]->data, J_coord[node0]->ncol, J_coord[node0]->ncol,
coord + pt_s1, n_point, node1_npt,
x_spos, y0[node0]->data, y1_dst_0, y_spos,
krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval
);
}
}
// (3) node0 is a leaf node and its level is higher than node1's level,
// only compressed on node1's side, node0's side don't need the
// downward sweep and can directly accumulate result to output vector
if (level0 < level1)
{
int pt_s0 = pt_cluster[node0 * 2];
int node0_npt = pt_cluster[node0 * 2 + 1] - pt_s0 + 1;
int vec_s0 = mat_cluster[node0 * 2];
int ncol1 = y1[node1]->ncol;
DTYPE *y1_dst_1 = y1[node1]->data + tid * ncol1;
if (krnl_bimv != NULL)
{
const DTYPE *x_spos = x + pt_s0;
DTYPE *y_spos = y + pt_s0;
int node1_npt = Bi_ncol / krnl_dim;
H2P_ext_krnl_bimv(
coord + pt_s0, n_point, node0_npt,
J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol,
y0[node1]->data, x_spos, y_spos, y1_dst_1,
node1_npt, n_point, n_point, node1_npt,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv
);
} else {
const DTYPE *x_spos = x + vec_s0;
DTYPE *y_spos = y + vec_s0;
H2P_krnl_eval_bimv(
coord + pt_s0, n_point, node0_npt,
J_coord[node1]->data, J_coord[node1]->ncol, J_coord[node1]->ncol,
y0[node1]->data, x_spos, y_spos, y1_dst_1,
krnl_dim, Bi_blk_npt, Bi->data, krnl_param, krnl_eval
);
}
}
} // End of i loop
} // End of i_blk loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
// 3. Sum thread-local buffers in y1
H2P_matvec_sum_y1_thread(h2pack);
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec intermediate multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// H2 matvec backward transformation, calculate U_i * (B_{ij} * (U_j^T * x_j))
void H2P_matvec_bwd_transform(H2Pack_p h2pack, const DTYPE *x, DTYPE *y)
{
int n_thread = h2pack->n_thread;
int max_child = h2pack->max_child;
int n_leaf_node = h2pack->n_leaf_node;
int max_level = h2pack->max_level;
int min_adm_level = (h2pack->is_HSS) ? h2pack->HSS_min_adm_level : h2pack->min_adm_level;
int *children = h2pack->children;
int *n_child = h2pack->n_child;
int *level_n_node = h2pack->level_n_node;
int *level_nodes = h2pack->level_nodes;
int *mat_cluster = h2pack->mat_cluster;
H2P_dense_mat_p *U = h2pack->U;
H2P_dense_mat_p *y1 = h2pack->y1;
H2P_thread_buf_p *thread_buf = h2pack->tb;
for (int i = min_adm_level; i <= max_level; i++)
{
int *level_i_nodes = level_nodes + i * n_leaf_node;
int level_i_n_node = level_n_node[i];
int n_thread_i = MIN(level_i_n_node, n_thread);
#pragma omp parallel num_threads(n_thread_i)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p y1_tmp = thread_buf[tid]->mat0;
thread_buf[tid]->timer = -get_wtime_sec();
#pragma omp for schedule(dynamic) nowait
for (int j = 0; j < level_i_n_node; j++)
{
int node = level_i_nodes[j];
int n_child_node = n_child[node];
int *child_nodes = children + node * max_child;
if (y1[node]->ld == 0) continue;
H2P_dense_mat_resize(y1_tmp, U[node]->nrow, 1);
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, U[node]->nrow, U[node]->ncol,
1.0, U[node]->data, U[node]->ld,
y1[node]->data, 1, 0.0, y1_tmp->data, 1
);
if (n_child_node == 0)
{
// Leaf node, accumulate final results to output vector
int s_index = mat_cluster[2 * node];
int e_index = mat_cluster[2 * node + 1];
int n_point = e_index - s_index + 1;
DTYPE *y_spos = y + s_index;
#pragma omp simd
for (int k = 0; k < n_point; k++)
y_spos[k] += y1_tmp->data[k];
} else {
// Non-leaf node, push down y1 values
int y1_tmp_idx = 0;
for (int k = 0; k < n_child_node; k++)
{
int child_k = child_nodes[k];
int child_k_len = U[child_k]->ncol;
DTYPE *y1_tmp_spos = y1_tmp->data + y1_tmp_idx;
if (y1[child_k]->ld == 0)
{
H2P_dense_mat_resize(y1[child_k], child_k_len, 1);
memcpy(y1[child_k]->data, y1_tmp_spos, sizeof(DTYPE) * child_k_len);
} else {
#pragma omp simd
for (int l = 0; l < child_k_len; l++)
y1[child_k]->data[l] += y1_tmp_spos[l];
}
y1_tmp_idx += child_k_len;
}
} // End of "if (n_child_node == 0)"
} // End of j loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread_i; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread_i;
INFO_PRINTF("Matvec backward transformation: level %d, %d/%d threads, %d nodes\n", i, n_thread_i, n_thread, level_i_n_node);
INFO_PRINTF(" min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
} // End of "if (h2pack->print_timers == 1)"
} // End of i loop
}
// Calculate H2 matvec dense multiplication part 0 task block on a thread
void H2P_matvec_dense_mult0_AOT_task_block(
H2Pack_p h2pack, const int tid,
const int i_blk0, const DTYPE *x, DTYPE *y
)
{
int *leaf_nodes = h2pack->height_nodes;
int *mat_cluster = h2pack->mat_cluster;
int *D_nrow = h2pack->D_nrow;
int *D_ncol = h2pack->D_ncol;
size_t *D_ptr = h2pack->D_ptr;
DTYPE *D_data = h2pack->D_data;
H2P_int_vec_p D_blk0 = h2pack->D_blk0;
int D_blk0_s = D_blk0->data[i_blk0];
int D_blk0_e = D_blk0->data[i_blk0 + 1];
for (int i = D_blk0_s; i < D_blk0_e; i++)
{
int node = leaf_nodes[i];
int vec_s = mat_cluster[node * 2];
DTYPE *y_spos = y + vec_s;
const DTYPE *x_spos = x + vec_s;
DTYPE *Di = D_data + D_ptr[i];
int Di_nrow = D_nrow[i];
int Di_ncol = D_ncol[i];
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, Di_nrow, Di_ncol,
1.0, Di, Di_ncol, x_spos, 1, 1.0, y_spos, 1
);
}
}
// Calculate H2 matvec dense multiplication part 1 task block on a thread
void H2P_matvec_dense_mult1_AOT_task_block(
H2Pack_p h2pack, const int tid,
const int i_blk1, const DTYPE *x, DTYPE *y
)
{
int n_leaf_node = h2pack->n_leaf_node;
int *r_inadm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_inadm_pairs : h2pack->r_inadm_pairs;
int *mat_cluster = h2pack->mat_cluster;
int *D_nrow = h2pack->D_nrow;
int *D_ncol = h2pack->D_ncol;
size_t *D_ptr = h2pack->D_ptr;
DTYPE *D_data = h2pack->D_data;
H2P_int_vec_p D_blk1 = h2pack->D_blk1;
int D_blk1_s = D_blk1->data[i_blk1];
int D_blk1_e = D_blk1->data[i_blk1 + 1];
for (int i = D_blk1_s; i < D_blk1_e; i++)
{
int node0 = r_inadm_pairs[2 * i];
int node1 = r_inadm_pairs[2 * i + 1];
int vec_s0 = mat_cluster[2 * node0];
int vec_s1 = mat_cluster[2 * node1];
DTYPE *y_spos0 = y + vec_s0;
DTYPE *y_spos1 = y + vec_s1;
const DTYPE *x_spos0 = x + vec_s0;
const DTYPE *x_spos1 = x + vec_s1;
DTYPE *Di = D_data + D_ptr[n_leaf_node + i];
int Di_nrow = D_nrow[n_leaf_node + i];
int Di_ncol = D_ncol[n_leaf_node + i];
CBLAS_BI_GEMV(
Di_nrow, Di_ncol, Di, Di_ncol,
x_spos1, x_spos0, y_spos0, y_spos1
);
}
}
// H2 matvec dense multiplication, calculate D_{ij} * x_j
// All D_{ij} matrices have been calculated and stored
void H2P_matvec_dense_mult_AOT(H2Pack_p h2pack, const DTYPE *x)
{
int n_thread = h2pack->n_thread;
H2P_int_vec_p D_blk0 = h2pack->D_blk0;
H2P_int_vec_p D_blk1 = h2pack->D_blk1;
H2P_thread_buf_p *thread_buf = h2pack->tb;
// If (n_D0_blk <= n_thread) or (n_D1_blk <= n_thread), D is constructed in
// H2Pack using a static workload partitioning and NUMA first-touch optimization,
// we also use the same static workload partitioning here for NUMA optimization.
// Otherwise, use OpenMP dynamic scheduler for load balance.
const int n_D0_blk = D_blk0->length - 1;
const int n_D1_blk = D_blk1->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
DTYPE *y = thread_buf[tid]->y;
thread_buf[tid]->timer = -get_wtime_sec();
// 1. Diagonal blocks matvec
if (n_D0_blk <= n_thread)
{
int i_blk0 = tid;
if (i_blk0 < n_D0_blk)
H2P_matvec_dense_mult0_AOT_task_block(h2pack, tid, i_blk0, x, y);
} else {
#pragma omp for schedule(dynamic) nowait
for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++)
H2P_matvec_dense_mult0_AOT_task_block(h2pack, tid, i_blk0, x, y);
} // End of "if (n_D0_blk-1 <= n_thread)"
// 2. Off-diagonal blocks from inadmissible pairs matvec
if (n_D1_blk <= n_thread)
{
int i_blk1 = tid;
if (i_blk1 < n_D1_blk)
H2P_matvec_dense_mult1_AOT_task_block(h2pack, tid, i_blk1, x, y);
} else {
#pragma omp for schedule(dynamic) nowait
for (int i_blk1 = 0; i_blk1 < n_D1_blk; i_blk1++)
H2P_matvec_dense_mult1_AOT_task_block(h2pack, tid, i_blk1, x, y);
} // End of "if (n_D1_blk-1 <= n_thread)"
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec dense multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// H2 matvec dense multiplication, calculate D_{ij} * x_j
// Need to calculate all D_{ij} matrices before using it
void H2P_matvec_dense_mult_JIT(H2Pack_p h2pack, const DTYPE *x)
{
int n_thread = h2pack->n_thread;
int xpt_dim = h2pack->xpt_dim;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int n_leaf_node = h2pack->n_leaf_node;
int *r_inadm_pairs = (h2pack->is_HSS) ? h2pack->HSS_r_inadm_pairs : h2pack->r_inadm_pairs;
int *leaf_nodes = h2pack->height_nodes;
int *pt_cluster = h2pack->pt_cluster;
int *mat_cluster = h2pack->mat_cluster;
int *D_ncol = h2pack->D_ncol;
DTYPE *coord = h2pack->coord;
void *krnl_param = h2pack->krnl_param;
H2P_int_vec_p D_blk0 = h2pack->D_blk0;
H2P_int_vec_p D_blk1 = h2pack->D_blk1;
kernel_eval_fptr krnl_eval = h2pack->krnl_eval;
kernel_bimv_fptr krnl_bimv = h2pack->krnl_bimv;
H2P_thread_buf_p *thread_buf = h2pack->tb;
const int n_D0_blk = D_blk0->length - 1;
const int n_D1_blk = D_blk1->length - 1;
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
H2P_dense_mat_p Di = thread_buf[tid]->mat0;
H2P_dense_mat_p tmp = thread_buf[tid]->mat0;
DTYPE *y = thread_buf[tid]->y;
H2P_dense_mat_p workbuf = thread_buf[tid]->mat1;
thread_buf[tid]->timer = -get_wtime_sec();
// 1. Diagonal blocks matvec
#pragma omp for schedule(dynamic) nowait
for (int i_blk0 = 0; i_blk0 < n_D0_blk; i_blk0++)
{
int D_blk0_s = D_blk0->data[i_blk0];
int D_blk0_e = D_blk0->data[i_blk0 + 1];
for (int i = D_blk0_s; i < D_blk0_e; i++)
{
int node = leaf_nodes[i];
int pt_s = pt_cluster[node * 2];
int vec_s = mat_cluster[node * 2];
int node_npt = pt_cluster[node * 2 + 1] - pt_s + 1;
H2P_dense_mat_resize(tmp, node_npt * krnl_dim, 1);
// Discard x_out_1 stored in tmp->data
if (krnl_bimv != NULL)
{
DTYPE *y_spos = y + pt_s;
const DTYPE *x_spos = x + pt_s;
H2P_ext_krnl_bimv(
coord + pt_s, n_point, node_npt,
coord + pt_s, n_point, node_npt,
x_spos, x_spos, y_spos, tmp->data,
n_point, 0, n_point, 0, // ldi1 and ldo1 need to be 0 here!
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv
);
} else {
DTYPE *y_spos = y + vec_s;
const DTYPE *x_spos = x + vec_s;
int Di_ncol = D_ncol[i];
int Di_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Di_ncol);
int Di_blk_npt = Di_nrow_128KB / krnl_dim;
Di_nrow_128KB = Di_blk_npt * krnl_dim;
H2P_dense_mat_resize(Di, Di_nrow_128KB, Di_ncol);
H2P_krnl_eval_bimv(
coord + pt_s, n_point, node_npt,
coord + pt_s, n_point, node_npt,
x_spos, x_spos, y_spos, tmp->data,
krnl_dim, Di_blk_npt, Di->data, krnl_param, krnl_eval
);
}
}
} // End of i_blk0 loop
// 2. Off-diagonal blocks from inadmissible pairs matvec
#pragma omp for schedule(dynamic) nowait
for (int i_blk1 = 0; i_blk1 < n_D1_blk; i_blk1++)
{
int D_blk1_s = D_blk1->data[i_blk1];
int D_blk1_e = D_blk1->data[i_blk1 + 1];
for (int i = D_blk1_s; i < D_blk1_e; i++)
{
int node0 = r_inadm_pairs[2 * i];
int node1 = r_inadm_pairs[2 * i + 1];
int pt_s0 = pt_cluster[2 * node0];
int pt_s1 = pt_cluster[2 * node1];
int vec_s0 = mat_cluster[2 * node0];
int vec_s1 = mat_cluster[2 * node1];
int node0_npt = pt_cluster[2 * node0 + 1] - pt_s0 + 1;
int node1_npt = pt_cluster[2 * node1 + 1] - pt_s1 + 1;
if (krnl_bimv != NULL)
{
DTYPE *y_spos0 = y + pt_s0;
DTYPE *y_spos1 = y + pt_s1;
const DTYPE *x_spos0 = x + pt_s0;
const DTYPE *x_spos1 = x + pt_s1;
H2P_ext_krnl_bimv(
coord + pt_s0, n_point, node0_npt,
coord + pt_s1, n_point, node1_npt,
x_spos1, x_spos0, y_spos0, y_spos1,
n_point, n_point, n_point, n_point,
xpt_dim, krnl_dim, workbuf, krnl_param, krnl_bimv
);
} else {
DTYPE *y_spos0 = y + vec_s0;
DTYPE *y_spos1 = y + vec_s1;
const DTYPE *x_spos0 = x + vec_s0;
const DTYPE *x_spos1 = x + vec_s1;
int Di_ncol = D_ncol[n_leaf_node + i];
int Di_nrow_128KB = (128 * 1024) / (sizeof(DTYPE) * Di_ncol);
int Di_blk_npt = Di_nrow_128KB / krnl_dim;
Di_nrow_128KB = Di_blk_npt * krnl_dim;
H2P_dense_mat_resize(Di, Di_nrow_128KB, Di_ncol);
H2P_krnl_eval_bimv(
coord + pt_s0, n_point, node0_npt,
coord + pt_s1, n_point, node1_npt,
x_spos1, x_spos0, y_spos0, y_spos1,
krnl_dim, Di_blk_npt, Di->data, krnl_param, krnl_eval
);
}
}
} // End of i_blk1 loop
thread_buf[tid]->timer += get_wtime_sec();
} // End of "pragma omp parallel"
if (h2pack->print_timers == 1)
{
double max_t = 0.0, avg_t = 0.0, min_t = 19241112.0;
for (int i = 0; i < n_thread; i++)
{
double thread_i_timer = thread_buf[i]->timer;
avg_t += thread_i_timer;
max_t = MAX(max_t, thread_i_timer);
min_t = MIN(min_t, thread_i_timer);
}
avg_t /= (double) n_thread;
INFO_PRINTF("Matvec dense multiplication: min/avg/max thread wall-time = %.3lf, %.3lf, %.3lf (s)\n", min_t, avg_t, max_t);
}
}
// Permute the multiplicand vector from the original point ordering to the
// sorted point ordering inside H2Pack
void H2P_permute_vector_forward(H2Pack_p h2pack, const DTYPE *x, DTYPE *pmt_x)
{
gather_vector_elements(sizeof(DTYPE), h2pack->krnl_mat_size, h2pack->fwd_pmt_idx, x, pmt_x);
}
// Permute the output vector from the sorted point ordering inside H2Pack
// to the original point ordering
void H2P_permute_vector_backward(H2Pack_p h2pack, const DTYPE *x, DTYPE *pmt_x)
{
gather_vector_elements(sizeof(DTYPE), h2pack->krnl_mat_size, h2pack->bwd_pmt_idx, x, pmt_x);
}
// H2 representation multiplies a column vector
void H2P_matvec(H2Pack_p h2pack, const DTYPE *x, DTYPE *y)
{
double st, et;
int krnl_mat_size = h2pack->krnl_mat_size;
int n_thread = h2pack->n_thread;
int BD_JIT = h2pack->BD_JIT;
int krnl_dim = h2pack->krnl_dim;
int n_point = h2pack->n_point;
int need_trans = ((h2pack->krnl_bimv != NULL) && (BD_JIT == 1) && (krnl_dim > 1));
DTYPE *xT = h2pack->xT;
DTYPE *yT = h2pack->yT;
DTYPE *pmt_x = h2pack->pmt_x;
DTYPE *pmt_y = h2pack->pmt_y;
double *timers = h2pack->timers;
size_t *mat_size = h2pack->mat_size;
H2P_thread_buf_p *thread_buf = h2pack->tb;
DTYPE *x_ = need_trans ? xT : pmt_x;
DTYPE *y_ = need_trans ? yT : pmt_y;
// 1. Forward permute the input vector
st = get_wtime_sec();
H2P_permute_vector_forward(h2pack, x, pmt_x);
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
// 2. Reset partial y result in each thread-local buffer to 0
st = get_wtime_sec();
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
DTYPE *tid_y = thread_buf[tid]->y;
memset(tid_y, 0, sizeof(DTYPE) * krnl_mat_size);
#pragma omp for
for (int i = 0; i < krnl_mat_size; i++)
{
pmt_y[i] = 0;
yT[i] = 0;
}
}
mat_size[MV_VOP_SIZE_IDX] += (2 + n_thread) * krnl_mat_size;
if (need_trans)
{
H2P_transpose_dmat(n_thread, n_point, krnl_dim, pmt_x, krnl_dim, xT, n_point);
mat_size[MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
}
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
// 3. Forward transformation, calculate U_j^T * x_j
st = get_wtime_sec();
H2P_matvec_fwd_transform(h2pack, pmt_x);
et = get_wtime_sec();
timers[MV_FWD_TIMER_IDX] += et - st;
// 4. Intermediate multiplication, calculate B_{ij} * (U_j^T * x_j)
st = get_wtime_sec();
if (BD_JIT == 1)
{
if (need_trans) H2P_transpose_y0_from_krnldim(h2pack);
H2P_matvec_intmd_mult_JIT(h2pack, x_);
if (need_trans) H2P_transpose_y1_to_krnldim(h2pack);
} else {
H2P_matvec_intmd_mult_AOT(h2pack, pmt_x);
}
et = get_wtime_sec();
timers[MV_MID_TIMER_IDX] += et - st;
// 5. Backward transformation, calculate U_i * (B_{ij} * (U_j^T * x_j))
st = get_wtime_sec();
H2P_matvec_bwd_transform(h2pack, pmt_x, pmt_y);
et = get_wtime_sec();
timers[MV_BWD_TIMER_IDX] += et - st;
// 6. Dense multiplication, calculate D_i * x_i
st = get_wtime_sec();
if (BD_JIT == 1)
{
H2P_matvec_dense_mult_JIT(h2pack, x_);
} else {
H2P_matvec_dense_mult_AOT(h2pack, pmt_x);
}
et = get_wtime_sec();
timers[MV_DEN_TIMER_IDX] += et - st;
// 7. Reduce sum partial y results
st = get_wtime_sec();
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
int blk_spos, blk_len;
calc_block_spos_len(krnl_mat_size, n_thread, tid, &blk_spos, &blk_len);
for (int tid = 0; tid < n_thread; tid++)
{
DTYPE *y_src = thread_buf[tid]->y;
#pragma omp simd
for (int i = blk_spos; i < blk_spos + blk_len; i++) y_[i] += y_src[i];
}
}
mat_size[MV_VOP_SIZE_IDX] += (2 * n_thread + 1) * krnl_mat_size;
// We use xT here to hold the transpose of yT
if (need_trans)
{
H2P_transpose_dmat(n_thread, krnl_dim, n_point, yT, n_point, xT, krnl_dim);
#pragma omp parallel for simd
for (int i = 0; i < krnl_mat_size; i++) pmt_y[i] += xT[i];
mat_size[MV_VOP_SIZE_IDX] += 4 * krnl_mat_size;
}
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
// 8. Backward permute the output vector
st = get_wtime_sec();
H2P_permute_vector_backward(h2pack, pmt_y, y);
et = get_wtime_sec();
timers[MV_VOP_TIMER_IDX] += et - st;
//mat_size[_MV_VOP_SIZE_IDX] += 2 * krnl_mat_size;
h2pack->n_matvec++;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
packedstream_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <nvbio/basic/cached_iterator.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
namespace nvbio {
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType, typename ValueType>
struct packer {
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 5u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (32u - SYMBOL_SIZE - uint32(bit_idx & 31u)) : uint32(bit_idx & 31u);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
else
{
const uint32 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 31u);
const uint32 symbol1 = (word1 >> symbol_offset) & SYMBOL_MASK;
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 32u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint32 rem_mask = (1u << rem_bits) - 1u;
const uint32 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = word2 & rem_mask;
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 5u );
if (is_pow2<SYMBOL_SIZE>())
{
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (32u - SYMBOL_SIZE - uint32(bit_idx & 31u)) : uint32(bit_idx & 31u);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint32 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 31u);
const uint32 symbol1 = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 32u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint32 rem_mask = (1u << rem_bits) - 1u;
uint32 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = uint32(sym & SYMBOL_MASK) >> read_bits;
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 6u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (64u - SYMBOL_SIZE - uint32(bit_idx & 63u)) : uint32(bit_idx & 63u);
const uint32 symbol = uint32((word >> symbol_offset) & SYMBOL_MASK);
return Symbol( symbol );
}
else
{
const uint64 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 63u);
const uint32 symbol1 = uint32((word1 >> symbol_offset) & SYMBOL_MASK);
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 64u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint64 rem_mask = (uint64(1u) << rem_bits) - 1u;
const uint64 word2 = stream[ word_idx+1 ];
const uint32 symbol2 = uint32(word2 & rem_mask);
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 6u );
if (is_pow2<SYMBOL_SIZE>())
{
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (64u - SYMBOL_SIZE - uint32(bit_idx & 63u)) : uint32(bit_idx & 63u);
const uint64 symbol = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint64 word1 = stream[ word_idx ];
const uint32 symbol_offset = uint32(bit_idx & 63);
const uint64 symbol1 = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 64u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint64 rem_mask = (uint64(1u) << rem_bits) - 1u;
uint64 word2 = stream[ word_idx+1 ];
const uint64 symbol2 = uint64(sym & SYMBOL_MASK) >> read_bits;
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,uint8>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint8 SYMBOL_COUNT = uint8(1u) << SYMBOL_SIZE;
const uint8 SYMBOL_MASK = SYMBOL_COUNT - uint8(1u);
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 3u );
if (is_pow2<SYMBOL_SIZE>())
{
const uint8 word = stream[ word_idx ];
const uint8 symbol_offset = BIG_ENDIAN_T ? (8u - SYMBOL_SIZE - uint8(bit_idx & 7u)) : uint8(bit_idx & 7u);
const uint8 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
else
{
const uint8 word1 = stream[ word_idx ];
const uint8 symbol_offset = uint8(bit_idx & 7u);
const uint8 symbol1 = (word1 >> symbol_offset) & SYMBOL_MASK;
// check if we need to read a second word
const uint32 read_bits = nvbio::min( 8u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
const uint8 rem_mask = uint8((1u << rem_bits) - 1u);
const uint8 word2 = stream[ word_idx+1 ];
const uint8 symbol2 = word2 & rem_mask;
return Symbol( symbol1 | (symbol2 << read_bits) );
}
else
return Symbol( symbol1 );
}
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint8 SYMBOL_COUNT = uint8(1u) << SYMBOL_SIZE;
const uint8 SYMBOL_MASK = SYMBOL_COUNT - uint8(1u);
typedef typename unsigned_type<IndexType>::type index_type;
const uint64 bit_idx = uint64(sym_idx) * SYMBOL_SIZE;
const index_type word_idx = index_type( bit_idx >> 3u );
if (is_pow2<SYMBOL_SIZE>())
{
uint8 word = stream[ word_idx ];
const uint8 symbol_offset = BIG_ENDIAN_T ? (8u - SYMBOL_SIZE - uint8(bit_idx & 7u)) : uint8(bit_idx & 7u);
const uint8 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
else
{
uint8 word1 = stream[ word_idx ];
const uint8 symbol_offset = uint8(bit_idx & 7u);
const uint8 symbol1 = uint8(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word1 &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word1 | symbol1;
// check if we need to write a second word
const uint32 read_bits = nvbio::min( 8u - symbol_offset, SYMBOL_SIZE );
const uint32 rem_bits = SYMBOL_SIZE - read_bits;
if (rem_bits)
{
uint8 word2 = stream[ word_idx+1 ];
const uint8 symbol2 = uint32(sym & SYMBOL_MASK) >> read_bits;
const uint8 rem_mask = uint8((1u << rem_bits) - 1u);
// clear all bits
word2 &= ~rem_mask;
// set bits
stream[ word_idx+1 ] = word2 | symbol2;
}
}
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint32>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 3u;
const uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 3u;
uint32 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 6u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 63u) >> 4u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 6u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 63u) >> 4u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (30u - (uint32(sym_idx & 15u) << 1)) : uint32((sym_idx & 15u) << 1);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 31u) >> 3u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 31u) >> 3u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (28u - (uint32(sym_idx & 7u) << 2)) : uint32((sym_idx & 7u) << 2);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,8u,Symbol,InputStream,IndexType,uint4>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 255u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
const uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 15u) >> 2u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (24u - (uint32(sym_idx & 3u) << 3)) : uint32((sym_idx & 3u) << 3);
const uint32 symbol = (comp( word, symbol_comp ) >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 255u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 4u;
uint4 word = stream[ word_idx ];
const uint32 symbol_comp = (sym_idx & 15u) >> 2u;
const uint32 symbol_offset = BIG_ENDIAN_T ? (24u - (uint32(sym_idx & 3u) << 3)) : uint32((sym_idx & 3u) << 3);
const uint32 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
select( word, symbol_comp ) &= ~(SYMBOL_MASK << symbol_offset);
select( word, symbol_comp ) |= symbol;
// set bits
stream[ word_idx ] = word;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,2u,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (62u - (uint32(sym_idx & 31u) << 1)) : uint32((sym_idx & 31u) << 1);
const uint64 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 3u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (62u - (uint32(sym_idx & 31u) << 1)) : uint32((sym_idx & 31u) << 1);
const uint64 symbol = uint64(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <bool BIG_ENDIAN_T, typename Symbol, typename InputStream, typename IndexType>
struct packer<BIG_ENDIAN_T,4u,Symbol,InputStream,IndexType,uint64>
{
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol get_symbol(InputStream stream, const IndexType sym_idx)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
const uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (60u - (uint32(sym_idx & 15u) << 2)) : uint32((sym_idx & 15u) << 2);
const uint64 symbol = (word >> symbol_offset) & SYMBOL_MASK;
return Symbol( symbol );
}
static NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void set_symbol(InputStream stream, const IndexType sym_idx, Symbol sym)
{
const uint32 SYMBOL_MASK = 15u;
typedef typename unsigned_type<IndexType>::type index_type;
const index_type word_idx = sym_idx >> 5u;
uint64 word = stream[ word_idx ];
const uint32 symbol_offset = BIG_ENDIAN_T ? (60u - (uint32(sym_idx & 15u) << 2)) : uint32((sym_idx & 15u) << 2);
const uint64 symbol = uint32(sym & SYMBOL_MASK) << symbol_offset;
// clear all bits
word &= ~(SYMBOL_MASK << symbol_offset);
// set bits
stream[ word_idx ] = word | symbol;
}
};
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE Symbol PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::get(const index_type sym_idx) const
{
return packer<BIG_ENDIAN_T, SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::get_symbol( m_stream, sym_idx + m_index );
}
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE void PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::set(const index_type sym_idx, const Symbol sym)
{
return packer<BIG_ENDIAN_T, SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::set_symbol( m_stream, sym_idx + m_index, sym );
}
// pre-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ ()
{
++m_index;
return *this;
}
// post-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ (int dummy)
{
This r( m_stream, m_index );
++m_index;
return r;
}
// pre-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- ()
{
--m_index;
return *this;
}
// post-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- (int dummy)
{
This r( m_stream, m_index );
--m_index;
return r;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+= (const sindex_type distance)
{
m_index += distance;
return *this;
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-= (const sindex_type distance)
{
m_index -= distance;
return *this;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+ (const sindex_type distance) const
{
return This( m_stream, m_index + distance );
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const sindex_type distance) const
{
return This( m_stream, m_index - distance );
}
// difference
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
typename PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::sindex_type
PackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const PackedStream it) const
{
return sindex_type( m_index - it.m_index );
}
// assignment operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>& PackedStreamRef<Stream>::operator= (const PackedStreamRef& ref)
{
return (*this = Symbol( ref ));
}
// assignment operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>& PackedStreamRef<Stream>::operator= (const Symbol s)
{
m_stream.set( s );
return *this;
}
// conversion operator
//
template <typename Stream>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE PackedStreamRef<Stream>::operator Symbol() const
{
return m_stream.get();
}
/// less than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator< (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() < it2.index();
}
/// greater than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator> (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() > it2.index();
}
/// equality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() == it2.stream() && it1.index() == it2.index();
}
/// inequality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() != it2.stream() || it1.index() != it2.index();
}
template <bool BIG_ENDIAN, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType, typename ValueType>
struct forward_packer
{
typedef ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE,BIG_ENDIAN,IndexType> forward_stream_type;
static const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
static const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
static const uint32 WORD_SIZE = 8u * uint32( sizeof(ValueType) );
static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void rebase(forward_stream_type& it)
{
const uint32 symbol_idx = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word_index = it.m_index / SYMBOLS_PER_WORD;
it.m_word_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - symbol_idx * SYMBOL_SIZE) : symbol_idx * SYMBOL_SIZE;
it.m_word = it.m_stream[ it.m_word_index ];
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void next(forward_stream_type& it)
{
it.m_index++;
if (BIG_ENDIAN)
{
if (it.m_word_offset > 0)
it.m_word_offset -= SYMBOL_SIZE;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = WORD_SIZE - SYMBOL_SIZE;
}
}
else
{
if (it.m_word_offset < WORD_SIZE - SYMBOL_SIZE)
it.m_word_offset += SYMBOL_SIZE;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0;
}
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void prev(forward_stream_type& it)
{
it.m_index--;
if (BIG_ENDIAN)
{
if (it.m_word_offset < WORD_SIZE - SYMBOL_SIZE)
it.m_word_offset += SYMBOL_SIZE;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0u;
}
}
else
{
if (it.m_word_offset > 0)
it.m_word_offset -= SYMBOL_SIZE;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = WORD_SIZE - SYMBOL_SIZE;
}
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static Symbol fetch(const forward_stream_type& it)
{
return Symbol( (it.m_word >> it.m_word_offset) & SYMBOL_MASK );
}
};
template <bool BIG_ENDIAN, uint32 SYMBOL_SIZE, typename Symbol, typename InputStream, typename IndexType>
struct forward_packer<BIG_ENDIAN, SYMBOL_SIZE, Symbol, InputStream, IndexType, uint4>
{
typedef ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE,BIG_ENDIAN,IndexType> forward_stream_type;
static const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
static const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
static const uint32 WORD_SIZE = 128;
static const uint32 SUBWORD_SIZE = 32;
static const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
static const uint32 SYMBOLS_PER_SUBWORD = SUBWORD_SIZE / SYMBOL_SIZE;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void rebase(forward_stream_type& it)
{
const uint32 symbol_idx = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word_index = it.m_index / SYMBOLS_PER_WORD;
it.m_word_offset = it.m_index & (SYMBOLS_PER_WORD-1);
it.m_word = it.m_stream[ it.m_word_index ];
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void next(forward_stream_type& it)
{
it.m_index++;
if (it.m_word_offset < SYMBOLS_PER_WORD-1)
it.m_word_offset++;
else
{
// need a new word
++it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = 0;
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static void prev(forward_stream_type& it)
{
it.m_index--;
if (it.m_word_offset > 0)
it.m_word_offset--;
else
{
// need a new word
--it.m_word_index;
it.m_word = it.m_stream[ it.m_word_index ];
it.m_word_offset = SYMBOLS_PER_WORD - 1u;
}
}
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
static Symbol fetch(const forward_stream_type& it)
{
const uint32 word_comp = comp( it.m_word, it.m_word_offset / SYMBOLS_PER_SUBWORD );
const uint32 word_mod = it.m_word_offset & (SYMBOLS_PER_SUBWORD-1);
const uint32 word_offset = BIG_ENDIAN ? (SUBWORD_SIZE - SYMBOL_SIZE - word_mod * SYMBOL_SIZE) :
(word_mod * SYMBOL_SIZE);
return Symbol( (word_comp >> word_offset) & SYMBOL_MASK );
}
};
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
Symbol ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::get() const
{
return forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE,Symbol,InputStream,IndexType,storage_type>::fetch( *this );
}
// rebase the iterator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::rebase(void)
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::rebase(*this);
}
// pre-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ ()
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::next(*this);
return *this;
}
// post-increment operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator++ (int dummy)
{
This r( m_stream, m_index );
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::next(*this);
return r;
}
// pre-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- ()
{
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::prev(*this);
return *this;
}
// post-decrement operator
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-- (int dummy)
{
This r( m_stream, m_index );
forward_packer<BIG_ENDIAN_T,SYMBOL_SIZE_T,Symbol,InputStream,IndexType,storage_type>::prev(*this);
return r;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+= (const sindex_type distance)
{
m_index += distance;
rebase();
return *this;
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>&
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator-= (const sindex_type distance)
{
m_index -= distance;
rebase();
return *this;
}
// add offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator+ (const sindex_type distance) const
{
return This( m_stream, m_index + distance );
}
// subtract offset
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const sindex_type distance) const
{
return This( m_stream, m_index - distance );
}
// difference
//
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
typename ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::sindex_type
ForwardPackedStream<InputStream,Symbol, SYMBOL_SIZE_T, BIG_ENDIAN_T, IndexType>::operator- (const ForwardPackedStream it) const
{
return sindex_type( m_index - it.m_index );
}
/// less than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator< (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() < it2.index();
}
/// greater than
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator> (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.index() > it2.index();
}
/// equality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator== (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() == it2.stream() && it1.index() == it2.index();
}
/// inequality test
///
template <typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool operator!= (
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it1,
const ForwardPackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType>& it2)
{
return it1.stream() != it2.stream() || it1.index() != it2.index();
}
namespace priv {
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void serial_assign(
const IndexType input_len,
InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const bool BIG_ENDIAN = BIG_ENDIAN_T;
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
InputStream words = packed_string.stream();
const IndexType stream_offset = packed_string.index();
const uint32 word_offset = stream_offset & (SYMBOLS_PER_WORD-1);
uint32 word_rem = 0;
if (word_offset)
{
// compute how many symbols we still need to encode to fill the current word
word_rem = SYMBOLS_PER_WORD - word_offset;
// fetch the word in question
word_type word = words[ stream_offset / SYMBOLS_PER_WORD ];
// loop through the word's bp's
for (uint32 i = 0; i < word_rem; ++i)
{
// fetch the bp
const uint8 bp = input_string[i] & SYMBOL_MASK;
const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// clear all bits
word &= ~(word_type(SYMBOL_MASK) << symbol_offset);
// set bits
word |= symbol;
}
// write out the word
words[ stream_offset / SYMBOLS_PER_WORD ] = word;
}
#if defined(_OPENMP) && !defined(NVBIO_DEVICE_COMPILATION)
// we use this solution because the 'if' clause in the 'pragma omp for' results in 30% slowdown
// when the if is not taken and the loop is executed serially
if (input_len > 1000000)
{
#pragma omp parallel for
for (int64 i = word_rem; i < int64( input_len ); i += SYMBOLS_PER_WORD)
{
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
const uint32 bit_idx = j * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
else
#endif
{
for (IndexType i = word_rem; i < input_len; i += SYMBOLS_PER_WORD)
{
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// get the offset to the first symbol
uint32 symbol_offset = BIG_ENDIAN ? WORD_SIZE - SYMBOL_SIZE : 0u;
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
//const uint32 bit_idx = j * SYMBOL_SIZE;
//const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
// move the offset
if (BIG_ENDIAN) symbol_offset -= SYMBOL_SIZE;
else symbol_offset += SYMBOL_SIZE;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
}
} // namespace priv
#if defined(__CUDACC__)
namespace priv {
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
__global__
void assign_kernel(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const bool BIG_ENDIAN = BIG_ENDIAN_T;
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const uint32 SYMBOL_COUNT = 1u << SYMBOL_SIZE;
const uint32 SYMBOL_MASK = SYMBOL_COUNT - 1u;
const IndexType stream_offset = packed_string.index(); // stream offset, in symbols
const uint32 word_offset = stream_offset & (SYMBOLS_PER_WORD-1); // offset within the first word
const uint32 word_rem = SYMBOLS_PER_WORD - word_offset; // # of remaining symbols to fill the first word
InputStream words = packed_string.stream();
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id == 0)
{
// fetch the word in question
word_type word = words[ stream_offset / SYMBOLS_PER_WORD ];
// loop through the word's bp's
for (uint32 i = 0; i < word_rem; ++i)
{
// fetch the bp
const uint8 bp = input_string[i] & SYMBOL_MASK;
const uint32 bit_idx = (word_offset + i) * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// clear all bits
word &= ~(uint64(SYMBOL_MASK) << symbol_offset);
// set bits
word |= symbol;
}
// write out the word
words[ stream_offset / SYMBOLS_PER_WORD ] = word;
}
else
{
// check whether this thread should do something
if (word_rem + (thread_id - 1u) * SYMBOLS_PER_WORD >= input_len)
return;
const uint32 i = word_rem + (thread_id - 1u) * SYMBOLS_PER_WORD;
// encode a word's worth of characters
word_type word = 0u;
const uint32 n_symbols = nvbio::min( SYMBOLS_PER_WORD, uint32( input_len - IndexType(i) ) );
// loop through the word's bp's
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
if (j < n_symbols)
{
// fetch the bp
const uint8 bp = input_string[IndexType(i) + j] & SYMBOL_MASK;
const uint32 bit_idx = j * SYMBOL_SIZE;
const uint32 symbol_offset = BIG_ENDIAN ? (WORD_SIZE - SYMBOL_SIZE - bit_idx) : bit_idx;
const word_type symbol = word_type(bp) << symbol_offset;
// set bits
word |= symbol;
}
}
// write out the word
const uint32 word_idx = uint32( (stream_offset + IndexType(i)) / SYMBOLS_PER_WORD );
words[ word_idx ] = word;
}
}
// assign a sequence to a packed stream
// NOTE: this is a host ONLY function - marking it as host/device would cause compiler misbehaviours
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
void device_assign(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
if (input_len == 0)
return;
typedef PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_stream_type;
typedef typename packed_stream_type::storage_type word_type;
const uint32 WORD_SIZE = uint32( 8u * sizeof(word_type) );
const uint32 SYMBOL_SIZE = SYMBOL_SIZE_T;
const uint32 SYMBOLS_PER_WORD = WORD_SIZE / SYMBOL_SIZE;
const IndexType stream_offset = packed_string.index(); // stream offset, in symbols
const uint32 word_begin = util::divide_rz( stream_offset, SYMBOLS_PER_WORD );
const uint32 word_end = util::divide_ri( stream_offset + input_len, SYMBOLS_PER_WORD );
const uint32 n_words = word_end - word_begin;
const uint32 blockdim = 128u;
const uint32 n_blocks = util::divide_ri( n_words, blockdim );
priv::assign_kernel<<<n_blocks,blockdim>>>( input_len, input_string, packed_string );
cuda::check_error("assign_kernel()");
}
} // namespace priv
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const device_tag tag,
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
#if !defined(NVBIO_DEVICE_COMPILATION)
//
// this function is being called on the host: spawn a kernel
//
priv::device_assign( input_len, input_string, packed_string );
#else
//
// this function is being called on the device: call the serial implementation
//
priv::serial_assign( input_len, input_string, packed_string );
#endif
}
#endif // defined(__CUDACC__)
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const host_tag tag,
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
priv::serial_assign( input_len, input_string, packed_string );
}
// assign a sequence to a packed stream
//
template <typename InputIterator, typename InputStream, typename Symbol, uint32 SYMBOL_SIZE_T, bool BIG_ENDIAN_T, typename IndexType>
NVBIO_HOST_DEVICE
void assign(
const IndexType input_len,
const InputIterator input_string,
PackedStream<InputStream,Symbol,SYMBOL_SIZE_T,BIG_ENDIAN_T,IndexType> packed_string)
{
// find the system tag of the output packed stream
typedef typename iterator_system<InputStream>::type system_tag;
// and chose which function to call based on it
assign( system_tag(), input_len, input_string, packed_string );
}
//
// A utility function to transpose a set of packed input streams:
// the symbols of the i-th input stream is supposed to be stored contiguously in the range [offset(i), offset + N(i)]
// the *words* of i-th output stream will be stored in strided fashion at out_stream[tid, tid + (N(i)+symbols_per_word-1/symbols_per_word) * stride]
//
// \param stride output stride
// \param N length of this thread's string in the input stream
// \param in_offset offset of this thread's string in the input stream
// \param in_stream input stream
// \param out_stream output stream
//
template <uint32 BLOCKDIM, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename InStreamIterator, typename OutStreamIterator>
NVBIO_HOST_DEVICE
void transpose_packed_streams(const uint32 stride, const uint32 N, const uint32 in_offset, const InStreamIterator in_stream, OutStreamIterator out_stream)
{
typedef typename std::iterator_traits<InStreamIterator>::value_type word_type;
const uint32 SYMBOLS_PER_WORD = (sizeof(word_type)*8) / SYMBOL_SIZE;
uint32 word_offset = in_offset & (SYMBOLS_PER_WORD-1);
uint32 begin_word = in_offset / SYMBOLS_PER_WORD;
uint32 end_word = (in_offset + N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
// write out the output symbols
const uint32 N_words = (N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
word_type cur_word = in_stream[begin_word+0];
for (uint32 w = 0; w < N_words; ++w)
{
if (BIG_ENDIAN == false)
{
// fill the first part of the output word
word_type out_word = cur_word >> (word_offset*SYMBOL_SIZE);
// fetch the next word
cur_word = begin_word+w+1 < end_word ? in_stream[begin_word+w+1] : 0u;
// fill the second part of the output word
if (word_offset)
out_word |= cur_word << ((SYMBOLS_PER_WORD - word_offset)*SYMBOL_SIZE);
out_stream[ stride*w ] = out_word;
}
else
{
// fill the first part of the output word
word_type out_word = cur_word << (word_offset*SYMBOL_SIZE);
// fetch the next word
cur_word = begin_word+w+1 < end_word ? in_stream[begin_word+w+1] : 0u;
// fill the second part of the output word
if (word_offset)
out_word |= cur_word >> ((SYMBOLS_PER_WORD - word_offset)*SYMBOL_SIZE);
out_stream[ stride*w ] = out_word;
}
}
}
} // namespace nvbio
|
GB_binop__isgt_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int8)
// A*D function (colscale): GB (_AxD__isgt_int8)
// D*A function (rowscale): GB (_DxB__isgt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int8)
// C=scalar+B GB (_bind1st__isgt_int8)
// C=scalar+B' GB (_bind1st_tran__isgt_int8)
// C=A+scalar GB (_bind2nd__isgt_int8)
// C=A'+scalar GB (_bind2nd_tran__isgt_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_incrementalupdate_static_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H )
#define KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedIncrementalUpdateStaticScheme
* @ingroup KratosCore
* @brief This class provides the implementation of a static scheme
* @details The only operation done in this scheme is the update of the database, no predict is done
* @tparam TSparseSpace The sparse space considered
* @tparam TDenseSpace The dense space considered
* @see Scheme
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ResidualBasedIncrementalUpdateStaticScheme
: public Scheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedIncrementalUpdateStaticScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedIncrementalUpdateStaticScheme);
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
/// DoF array type definition
typedef typename BaseType::DofsArrayType DofsArrayType;
/// Data type definition
typedef typename BaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// The definition of the vector containing the equation ids
typedef Element::EquationIdVectorType EquationIdVectorType;
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The pseudo static scheme (parameters)
* @param ThisParameters Dummy parameters
*/
explicit ResidualBasedIncrementalUpdateStaticScheme(Parameters ThisParameters)
: BaseType()
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedIncrementalUpdateStaticScheme"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/** Default onstructor.
*/
explicit ResidualBasedIncrementalUpdateStaticScheme()
: BaseType()
{}
/** Copy Constructor.
*/
explicit ResidualBasedIncrementalUpdateStaticScheme(ResidualBasedIncrementalUpdateStaticScheme& rOther)
:BaseType(rOther)
{
}
/** Destructor.
*/
~ResidualBasedIncrementalUpdateStaticScheme() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution.
* @param rModelPart The model part of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
mpDofUpdater->UpdateDofs(rDofSet, rDx);
KRATOS_CATCH("")
}
/**
* @brief Performing the prediction of the solution.
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief It initializes a non-linear iteration (for the element)
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->InitializeNonLinearIteration(r_current_process_info);
}
KRATOS_CATCH( "" );
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @param rCurrentConditiont The condition to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Condition::Pointer rCurrentCondition,
ProcessInfo& rCurrentProcessInfo
) override
{
(rCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief It initializes a non-linear iteration (for an individual element)
* @param pCurrentElement The element to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Element::Pointer pCurrentElement,
ProcessInfo& rCurrentProcessInfo
) override
{
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system
* @param pCurrentElement The element to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
(pCurrentElement)->CalculateLocalSystem(rLHSContribution,rRHSContribution, rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param pCurrentCondition The condition to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void Condition_CalculateSystemContributions(
Condition::Pointer rCurrentCondition,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
(rCurrentCondition)->CalculateLocalSystem(rLHSContribution, rRHSContribution, rCurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param pCurrentElement The element to compute
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void Calculate_RHS_Contribution(
Element::Pointer pCurrentElement,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
(pCurrentElement)->CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param pCurrentCondition The condition to compute
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void Condition_Calculate_RHS_Contribution(
Condition::Pointer rCurrentCondition,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
(rCurrentCondition)->CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
(rCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief This function is designed to calculate just the LHS contribution
* @param pCurrentElement The element to compute
* @param rLHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void Calculate_LHS_Contribution(
Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
(pCurrentElement)->CalculateLeftHandSide(rLHSContribution, rCurrentProcessInfo);
(pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Liberate internal storage.
*/
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedIncrementalUpdateStaticScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /// The DoF updater, which will update the values
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ResidualBasedIncrementalUpdateStaticScheme
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H defined */
|
GB_AxB_dot3_phase1_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot3_phase1_template: analysis phase for dot3; C<M> = A'*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
}
int64_t bpleft = 0 ; // Ch is not jumbled
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C and M
//------------------------------------------------------------------
#if defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// M and C are sparse
const int64_t j = k ;
#else
// M and C are either both sparse or both hypersparse
const int64_t j = GBH (Ch, k) ;
#endif
GB_GET_VECTOR (pM, pM_end, pM, pM_end, Mp, k, mvlen) ;
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
#if GB_B_IS_HYPER
// B is hyper
int64_t pB_start, pB_end ;
GB_lookup (true, Bh, Bp, vlen, &bpleft, bnvec-1, j,
&pB_start, &pB_end) ;
#elif GB_B_IS_SPARSE
// B is sparse
const int64_t pB_start = Bp [j] ;
const int64_t pB_end = Bp [j+1] ;
#else
// B is bitmap or full
const int64_t pB_start = j * vlen ;
const int64_t pB_end = (j+1) * vlen ;
#endif
const int64_t bjnz = pB_end - pB_start ;
//------------------------------------------------------------------
// estimate the work to compute each entry of C(:,j)
//------------------------------------------------------------------
// A decent estimate of the work to compute the dot product C(i,j)
// = A(:,i)'*B(:,j) is min (|A(:,i)|, |B(:,j)|) + 1. This is a
// lower bound. The actual work could require a binary search of
// either A(:,i) or B(:,j), or a merge of the two vectors. Or it
// could require no work at all if all entries in A(:,i) appear
// before all entries in B(:,j), or visa versa. No work is done if
// M(i,j)=0.
if (bjnz == 0)
{
// B(:,j) is empty, so C(:,j) is empty as well. No work is to
// be done, but it still takes unit work to flag each C(:,j) as
// a zombie
for ( ; pM < pM_end ; pM++)
{
Cwork [pM] = 1 ;
}
}
else
{
for ( ; pM < pM_end ; pM++)
{
int64_t work = 1 ;
#if !defined ( GB_MASK_SPARSE_AND_STRUCTURAL )
// if M is structural, no need to check its values
if (GB_mcast (Mx, pM, msize))
#endif
{
const int64_t i = Mi [pM] ;
#if GB_A_IS_HYPER
// A is hyper
int64_t pA, pA_end ;
int64_t apleft = 0 ; // M might be jumbled
GB_lookup (true, Ah, Ap, vlen, &apleft, anvec-1, i,
&pA, &pA_end) ;
const int64_t ainz = pA_end - pA ;
work += GB_IMIN (ainz, bjnz) ;
#elif GB_A_IS_SPARSE
// A is sparse
const int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
const int64_t ainz = pA_end - pA ;
work += GB_IMIN (ainz, bjnz) ;
#else
// A is bitmap or full
work += bjnz ;
#endif
}
Cwork [pM] = work ;
}
}
}
}
}
|
Data.h |
/*****************************************************************************
*
* Copyright (c) 2003-2018 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014 by Centre for Geoscience Computing (GeoComp)
*
*****************************************************************************/
/** \file Data.h */
#ifndef __ESCRIPT_DATA_H__
#define __ESCRIPT_DATA_H__
#include "system_dep.h"
#include "DataAbstract.h"
#include "DataException.h"
#include "DataTypes.h"
#include "EsysMPI.h"
#include "FunctionSpace.h"
#include "DataVectorOps.h"
#include <algorithm>
#include <string>
#include <sstream>
#include <boost/python/object.hpp>
#include <boost/python/tuple.hpp>
#include <boost/math/special_functions/bessel.hpp>
#ifndef ESCRIPT_MAX_DATA_RANK
#define ESCRIPT_MAX_DATA_RANK 4
#endif
namespace escript {
//
// Forward declaration for various implementations of Data.
class DataConstant;
class DataTagged;
class DataExpanded;
class DataLazy;
/**
\brief
Data represents a collection of datapoints.
Description:
Internally, the datapoints are actually stored by a DataAbstract object.
The specific instance of DataAbstract used may vary over the lifetime
of the Data object.
Some methods on this class return references (eg getShape()).
These references should not be used after an operation which changes the underlying DataAbstract object.
Doing so will lead to invalid memory access.
This should not affect any methods exposed via boost::python.
*/
class Data {
public:
/**
Constructors.
*/
/**
\brief
Default constructor.
Creates a DataEmpty object.
*/
Data();
/**
\brief
Copy constructor.
WARNING: Only performs a shallow copy.
*/
Data(const Data& inData);
/**
\brief
Constructor from another Data object. If "what" is different from the
function space of inData the inData are tried to be interpolated to what,
otherwise a shallow copy of inData is returned.
*/
Data(const Data& inData,
const FunctionSpace& what);
/**
\brief Copy Data from an existing vector
*/
Data(const DataTypes::RealVectorType& value,
const DataTypes::ShapeType& shape,
const FunctionSpace& what,
bool expanded);
/**
\brief
Constructor which creates a Data with points having the specified shape.
\param value - Input - Single real value applied to all Data.
\param dataPointShape - Input - The shape of each data point.
\param what - Input - A description of what this data represents.
\param expanded - Input - Flag, if true fill the entire container with
the given value. Otherwise a more efficient storage
mechanism will be used.
*/
Data(DataTypes::real_t value,
const DataTypes::ShapeType& dataPointShape,
const FunctionSpace& what,
bool expanded);
/**
\brief
Constructor which creates a Data with points having the specified shape.
\param value - Input - Single complex value applied to all Data.
\param dataPointShape - Input - The shape of each data point.
\param what - Input - A description of what this data represents.
\param expanded - Input - Flag, if true fill the entire container with
the given value. Otherwise a more efficient storage
mechanism will be used.
*/
explicit
Data(DataTypes::cplx_t value,
const DataTypes::ShapeType& dataPointShape,
const FunctionSpace& what,
bool expanded);
/**
\brief
Constructor which performs a deep copy of a region from another Data object.
\param inData - Input - Input Data object.
\param region - Input - Region to copy.
*/
Data(const Data& inData,
const DataTypes::RegionType& region);
/**
\brief
Constructor which copies data from a wrapped array.
\param w - Input - Input data.
\param what - Input - A description of what this data represents.
\param expanded - Input - Flag, if true fill the entire container with
the value. Otherwise a more efficient storage
mechanism will be used.
*/
Data(const WrappedArray& w, const FunctionSpace& what,
bool expanded);
/**
\brief
Constructor which creates a DataConstant.
Copies data from any object that can be treated like a python array/sequence.
All other parameters are copied from other.
\param value - Input - Input data.
\param other - Input - contains all other parameters.
*/
Data(const boost::python::object& value,
const Data& other);
/**
This constructor subsumes a number of previous python ones.
Data(const boost::python::object& value,
const FunctionSpace& what=FunctionSpace(),
bool expanded=false);
Data(DataTypes::real_t value,
const boost::python::tuple& shape=boost::python::make_tuple(),
const FunctionSpace& what=FunctionSpace(),
bool expanded=false);
and a new
Data(cplx_t value,
const boost::python::tuple& shape=boost::python::make_tuple(),
const FunctionSpace& what=FunctionSpace(),
bool expanded=false);
*/
Data(boost::python::object value,
boost::python::object par1=boost::python::object(),
boost::python::object par2=boost::python::object(),
boost::python::object par3=boost::python::object());
/**
\brief Create a Data using an existing DataAbstract. Warning: The new object assumes ownership of the pointer!
Once you have passed the pointer, do not delete it.
*/
explicit Data(DataAbstract* underlyingdata);
/**
\brief Create a Data based on the supplied DataAbstract
*/
explicit Data(DataAbstract_ptr underlyingdata);
/**
\brief
Destructor
*/
~Data();
/**
\brief Make this object a deep copy of "other".
*/
void
copy(const Data& other);
/**
\brief Return a pointer to a deep copy of this object.
*/
Data
copySelf() const;
/**
\brief produce a delayed evaluation version of this Data.
*/
Data
delay();
/**
\brief convert the current data into lazy data.
*/
void
delaySelf();
/**
Member access methods.
*/
/**
\brief
switches on update protection
*/
void
setProtection();
/**
\brief
Returns true, if the data object is protected against update
*/
bool
isProtected() const;
/**
\brief
Return the value of a data point as a python tuple.
*/
const boost::python::object
getValueOfDataPointAsTuple(int dataPointNo);
/**
\brief
sets the values of a data-point from a python object on this process
*/
void
setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
/**
\brief
sets the values of a data-point from a array-like object on this process
*/
void
setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
/**
\brief
sets the values of a data-point on this process
*/
void
setValueOfDataPoint(int dataPointNo, const DataTypes::real_t);
void
setValueOfDataPointC(int dataPointNo, const DataTypes::cplx_t);
/**
\brief Return a data point across all processors as a python tuple.
*/
const boost::python::object
getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
/**
\brief Set the value of a global data point
*/
void
setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
/**
\brief
Return the tag number associated with the given data-point.
*/
int
getTagNumber(int dpno);
/**
\brief
Write the data as a string. For large amounts of data, a summary is printed.
*/
std::string
toString() const;
/**
\brief
Whatever the current Data type make this into a DataExpanded.
*/
void
expand();
/**
\brief
If possible convert this Data to DataTagged. This will only allow
Constant data to be converted to tagged. An attempt to convert
Expanded data to tagged will throw an exception.
*/
void
tag();
/**
\brief If this data is lazy, then convert it to ready data.
What type of ready data depends on the expression. For example, Constant+Tagged==Tagged.
*/
void
resolve();
/**
\brief returns return true if data contains NaN.
\warning This is dependent on the ability to reliably detect NaNs on your compiler.
See the nancheck function in LocalOps for details.
*/
bool
hasNaN();
/**
\brief replaces all NaN values with value
*/
void
replaceNaN(DataTypes::real_t value);
/**
\brief replaces all NaN values with value
*/
void
replaceNaN(DataTypes::cplx_t value);
/**
\brief replaces all NaN values with value
*/
void
replaceNaNPython(boost::python::object obj);
bool
hasInf();
void
replaceInf(DataTypes::real_t value);
void
replaceInf(DataTypes::cplx_t value);
void
replaceInfPython(boost::python::object obj);
/**
\brief Ensures data is ready for write access.
This means that the data will be resolved if lazy and will be copied if shared with another Data object.
\warning This method should only be called in single threaded sections of code. (It modifies m_data).
Do not create any Data objects from this one between calling requireWrite and getSampleDataRW.
Doing so might introduce additional sharing.
*/
void
requireWrite();
/**
\brief
Return true if this Data is expanded.
\note To determine if a sample will contain separate values for each datapoint. Use actsExpanded instead.
*/
bool
isExpanded() const;
/**
\brief
Return true if this Data is expanded or resolves to expanded.
That is, if it has a separate value for each datapoint in the sample.
*/
bool
actsExpanded() const;
/**
\brief
Return true if this Data is tagged.
*/
bool
isTagged() const;
/**
\brief
Return true if this Data is constant.
*/
bool
isConstant() const;
/**
\brief Return true if this Data is lazy.
*/
bool
isLazy() const;
/**
\brief Return true if this data is ready.
*/
bool
isReady() const;
/**
\brief
Return true if this Data holds an instance of DataEmpty. This is _not_ the same as asking if the object
contains datapoints.
*/
bool
isEmpty() const;
/**
\brief
True if components of this data are stored as complex
*/
bool
isComplex() const;
/**
\brief
Return the function space.
*/
inline
const FunctionSpace&
getFunctionSpace() const
{
return m_data->getFunctionSpace();
}
/**
\brief
Return the domain.
*/
inline
// const AbstractDomain&
const_Domain_ptr
getDomain() const
{
return getFunctionSpace().getDomain();
}
/**
\brief
Return the domain.
TODO: For internal use only. This should be removed.
*/
inline
// const AbstractDomain&
Domain_ptr
getDomainPython() const
{
return getFunctionSpace().getDomainPython();
}
/**
\brief
Return the rank of the point data.
*/
inline
unsigned int
getDataPointRank() const
{
return m_data->getRank();
}
/**
\brief
Return the number of data points
*/
inline
int
getNumDataPoints() const
{
return getNumSamples() * getNumDataPointsPerSample();
}
/**
\brief
Return the number of samples.
*/
inline
int
getNumSamples() const
{
return m_data->getNumSamples();
}
/**
\brief
Return the number of data points per sample.
*/
inline
int
getNumDataPointsPerSample() const
{
return m_data->getNumDPPSample();
}
/**
\brief
Returns true if the number of data points per sample and the number of
samples match the respective argument. DataEmpty always returns true.
*/
inline
bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
{
return (isEmpty() ||
(numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
}
/**
\brief
Returns true if the shape matches the vector (dimensions[0],...,
dimensions[rank-1]). DataEmpty always returns true.
*/
inline
bool isDataPointShapeEqual(int rank, const int* dimensions) const
{
if (isEmpty())
return true;
const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
return (getDataPointShape()==givenShape);
}
/**
\brief
Return the number of values in the shape for this object.
*/
int
getNoValues() const
{
return m_data->getNoValues();
}
/**
\brief
dumps the object into a netCDF file
*/
void
dump(const std::string fileName) const;
/**
\brief returns the values of the object as a list of tuples (one for each datapoint).
\param scalarastuple If true, scalar data will produce single valued tuples [(1,) (2,) ...]
If false, the result is a list of scalars [1, 2, ...]
*/
const boost::python::object
toListOfTuples(bool scalarastuple=true);
/**
\brief
Return the sample data for the given sample no.
Please do not use this unless you NEED to access samples individually
\param sampleNo - Input - the given sample no.
\return pointer to the sample data.
*/
const DataTypes::real_t*
getSampleDataRO(DataTypes::RealVectorType::size_type sampleNo, DataTypes::real_t dummy=0) const;
const DataTypes::cplx_t*
getSampleDataRO(DataTypes::CplxVectorType::size_type sampleNo, DataTypes::cplx_t dummy) const;
/**
\brief
Return the sample data for the given sample no.
Please do not use this unless you NEED to access samples individually
\param sampleNo - Input - the given sample no.
\return pointer to the sample data.
*/
DataTypes::real_t*
getSampleDataRW(DataTypes::RealVectorType::size_type sampleNo, DataTypes::real_t dummy=0);
DataTypes::cplx_t*
getSampleDataRW(DataTypes::RealVectorType::size_type sampleNo, DataTypes::cplx_t dummy);
/**
\brief
Return a pointer to the beginning of the underlying data
\warning please avoid using this method since it by-passes possible lazy improvements. May be removed without notice.
\return pointer to the data.
*/
const DataTypes::real_t*
getDataRO(DataTypes::real_t dummy=0) const;
const DataTypes::cplx_t*
getDataRO(DataTypes::cplx_t dummy) const;
/**
\brief
Return the sample data for the given tag. If an attempt is made to
access data that isn't tagged an exception will be thrown.
\param tag - Input - the tag key.
*/
inline
DataTypes::real_t*
getSampleDataByTag(int tag, DataTypes::real_t dummy=0)
{
return m_data->getSampleDataByTag(tag, dummy);
}
inline
DataTypes::cplx_t*
getSampleDataByTag(int tag, DataTypes::cplx_t dummy)
{
return m_data->getSampleDataByTag(tag, dummy);
}
/**
\brief
Return a reference into the DataVector which points to the specified data point.
\param sampleNo - Input -
\param dataPointNo - Input -
*/
DataTypes::RealVectorType::const_reference
getDataPointRO(int sampleNo, int dataPointNo);
/**
\brief
Return a reference into the DataVector which points to the specified data point.
\param sampleNo - Input -
\param dataPointNo - Input -
*/
DataTypes::RealVectorType::reference
getDataPointRW(int sampleNo, int dataPointNo);
/**
\brief
Return the offset for the given sample and point within the sample
*/
inline
DataTypes::RealVectorType::size_type
getDataOffset(int sampleNo,
int dataPointNo)
{
return m_data->getPointOffset(sampleNo,dataPointNo);
}
/**
\brief
Return a reference to the data point shape.
*/
inline
const DataTypes::ShapeType&
getDataPointShape() const
{
return m_data->getShape();
}
/**
\brief
Return the data point shape as a tuple of integers.
*/
const boost::python::tuple
getShapeTuple() const;
/**
\brief
Returns the product of the data point shapes
*/
long
getShapeProduct() const;
/**
\brief
Return the size of the data point. It is the product of the
data point shape dimensions.
*/
int
getDataPointSize() const;
/**
\brief
Return the number of doubles stored for this Data.
*/
DataTypes::RealVectorType::size_type
getLength() const;
/**
\brief Return true if this object contains no samples.
This is not the same as isEmpty()
*/
bool
hasNoSamples() const
{
return m_data->getNumSamples()==0;
}
/**
\brief
Assign the given value to the tag assocciated with name. Implicitly converts this
object to type DataTagged. Throws an exception if this object
cannot be converted to a DataTagged object or name cannot be mapped onto a tag key.
\param name - Input - name of tag.
\param value - Input - Value to associate with given key.
*/
void
setTaggedValueByName(std::string name,
const boost::python::object& value);
/**
\brief
Assign the given value to the tag. Implicitly converts this
object to type DataTagged if it is constant.
\param tagKey - Input - Integer key.
\param value - Input - Value to associate with given key.
==>*
*/
void
setTaggedValue(int tagKey,
const boost::python::object& value);
/**
\brief
Assign the given value to the tag. Implicitly converts this
object to type DataTagged if it is constant.
\param tagKey - Input - Integer key.
\param pointshape - Input - The shape of the value parameter
\param value - Input - Value to associate with given key.
\param dataOffset - Input - Offset of the begining of the point within the value parameter
*/
void
setTaggedValueFromCPP(int tagKey,
const DataTypes::ShapeType& pointshape,
const DataTypes::RealVectorType& value,
int dataOffset=0);
void
setTaggedValueFromCPP(int tagKey,
const DataTypes::ShapeType& pointshape,
const DataTypes::CplxVectorType& value,
int dataOffset=0);
/**
\brief
Copy other Data object into this Data object where mask is positive.
*/
void
copyWithMask(const Data& other,
const Data& mask);
/**
Data object operation methods and operators.
*/
/**
\brief
set all values to zero
*
*/
void
setToZero();
/**
\brief
Interpolates this onto the given functionspace and returns
the result as a Data object.
*
*/
Data
interpolate(const FunctionSpace& functionspace) const;
Data
interpolateFromTable3D(const WrappedArray& table, DataTypes::real_t Amin, DataTypes::real_t Astep,
DataTypes::real_t undef, Data& B, DataTypes::real_t Bmin, DataTypes::real_t Bstep, Data& C,
DataTypes::real_t Cmin, DataTypes::real_t Cstep, bool check_boundaries);
Data
interpolateFromTable2D(const WrappedArray& table, DataTypes::real_t Amin, DataTypes::real_t Astep,
DataTypes::real_t undef, Data& B, DataTypes::real_t Bmin, DataTypes::real_t Bstep,bool check_boundaries);
Data
interpolateFromTable1D(const WrappedArray& table, DataTypes::real_t Amin, DataTypes::real_t Astep,
DataTypes::real_t undef,bool check_boundaries);
Data
interpolateFromTable3DP(boost::python::object table, DataTypes::real_t Amin, DataTypes::real_t Astep,
Data& B, DataTypes::real_t Bmin, DataTypes::real_t Bstep, Data& C, DataTypes::real_t Cmin, DataTypes::real_t Cstep, DataTypes::real_t undef,bool check_boundaries);
Data
interpolateFromTable2DP(boost::python::object table, DataTypes::real_t Amin, DataTypes::real_t Astep,
Data& B, DataTypes::real_t Bmin, DataTypes::real_t Bstep, DataTypes::real_t undef,bool check_boundaries);
Data
interpolateFromTable1DP(boost::python::object table, DataTypes::real_t Amin, DataTypes::real_t Astep,
DataTypes::real_t undef,bool check_boundaries);
Data
nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
Data
nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
/**
\brief
Calculates the gradient of the data at the data points of functionspace.
If functionspace is not present the function space of Function(getDomain()) is used.
*
*/
Data
gradOn(const FunctionSpace& functionspace) const;
Data
grad() const;
/**
\brief
Calculate the integral over the function space domain as a python tuple.
*/
boost::python::object
integrateToTuple_const() const;
/**
\brief
Calculate the integral over the function space domain as a python tuple.
*/
boost::python::object
integrateToTuple();
/**
\brief
Returns 1./ Data object
*
*/
Data
oneOver() const;
/**
\brief
Return a Data with a 1 for +ive values and a 0 for 0 or -ive values.
*
*/
Data
wherePositive() const;
/**
\brief
Return a Data with a 1 for -ive values and a 0 for +ive or 0 values.
*
*/
Data
whereNegative() const;
/**
\brief
Return a Data with a 1 for +ive or 0 values and a 0 for -ive values.
*
*/
Data
whereNonNegative() const;
/**
\brief
Return a Data with a 1 for -ive or 0 values and a 0 for +ive values.
*
*/
Data
whereNonPositive() const;
/**
\brief
Return a Data with a 1 for 0 values and a 0 for +ive or -ive values.
*
*/
Data
whereZero(DataTypes::real_t tol=0.0) const;
/**
\brief
Return a Data with a 0 for 0 values and a 1 for +ive or -ive values.
*
*/
Data
whereNonZero(DataTypes::real_t tol=0.0) const;
/**
\brief
Return the maximum absolute value of this Data object.
The method is not const because lazy data needs to be expanded before Lsup can be computed.
The _const form can be used when the Data object is const, however this will only work for
Data which is not Lazy.
For Data which contain no samples (or tagged Data for which no tags in use have a value)
zero is returned.
*/
DataTypes::real_t
Lsup();
DataTypes::real_t
Lsup_const() const;
/**
\brief
Return the maximum value of this Data object.
The method is not const because lazy data needs to be expanded before sup can be computed.
The _const form can be used when the Data object is const, however this will only work for
Data which is not Lazy.
For Data which contain no samples (or tagged Data for which no tags in use have a value)
a large negative value is returned.
*/
DataTypes::real_t
sup();
DataTypes::real_t
sup_const() const;
/**
\brief
Return the minimum value of this Data object.
The method is not const because lazy data needs to be expanded before inf can be computed.
The _const form can be used when the Data object is const, however this will only work for
Data which is not Lazy.
For Data which contain no samples (or tagged Data for which no tags in use have a value)
a large positive value is returned.
*/
DataTypes::real_t
inf();
DataTypes::real_t
inf_const() const;
/**
\brief
Return the absolute value of each data point of this Data object.
*
*/
Data
abs() const;
/**
\brief
Return the phase/arg/angular-part of complex values.
*
*/
Data
phase() const;
/**
\brief
Return the maximum value of each data point of this Data object.
*
*/
Data
maxval() const;
/**
\brief
Return the minimum value of each data point of this Data object.
*
*/
Data
minval() const;
/**
\brief
Return the (sample number, data-point number) of the data point with
the minimum component value in this Data object.
\note If you are working in python, please consider using Locator
instead of manually manipulating process and point IDs.
*/
const boost::python::tuple
minGlobalDataPoint() const;
/**
\brief
Return the (sample number, data-point number) of the data point with
the minimum component value in this Data object.
\note If you are working in python, please consider using Locator
instead of manually manipulating process and point IDs.
*/
const boost::python::tuple
maxGlobalDataPoint() const;
/**
\brief
Return the sign of each data point of this Data object.
-1 for negative values, zero for zero values, 1 for positive values.
*
*/
Data
sign() const;
/**
\brief
Return the symmetric part of a matrix which is half the matrix plus its transpose.
*
*/
Data
symmetric() const;
/**
\brief
Return the antisymmetric part of a matrix which is half the matrix minus its transpose.
*
*/
Data
antisymmetric() const;
/**
\brief
Return the hermitian part of a matrix which is half the matrix plus its adjoint.
*
*/
Data
hermitian() const;
/**
\brief
Return the anti-hermitian part of a matrix which is half the matrix minus its hermitian.
*
*/
Data
antihermitian() const;
/**
\brief
Return the trace of a matrix
*
*/
Data
trace(int axis_offset) const;
/**
\brief
Transpose each data point of this Data object around the given axis.
*
*/
Data
transpose(int axis_offset) const;
/**
\brief
Return the eigenvalues of the symmetric part at each data point of this Data object in increasing values.
Currently this function is restricted to rank 2, square shape, and dimension 3.
*
*/
Data
eigenvalues() const;
/**
\brief
Return the eigenvalues and corresponding eigenvcetors of the symmetric part at each data point of this Data object.
the eigenvalues are ordered in increasing size where eigenvalues with relative difference less than
tol are treated as equal. The eigenvectors are orthogonal, normalized and the sclaed such that the
first non-zero entry is positive.
Currently this function is restricted to rank 2, square shape, and dimension 3
*
*/
const boost::python::tuple
eigenvalues_and_eigenvectors(const DataTypes::real_t tol=1.e-12) const;
/**
\brief
swaps the components axis0 and axis1
*
*/
Data
swapaxes(const int axis0, const int axis1) const;
/**
\brief
Return the error function erf of each data point of this Data object.
*
*/
Data
erf() const;
/**
\brief
For complex values return the conjugate values.
For non-complex data return a copy
*/
Data
conjugate() const;
Data
real() const;
Data
imag() const;
/**
\brief
Return the sin of each data point of this Data object.
*
*/
Data
sin() const;
/**
\brief
Return the cos of each data point of this Data object.
*
*/
Data
cos() const;
/**
\brief
Bessel worker function.
*
*/
Data
bessel(int order, DataTypes::real_t (*besselfunc) (int,DataTypes::real_t) );
/**
\brief
Return the Bessel function of the first kind for each data point of this Data object.
*
*/
Data
besselFirstKind(int order);
/**
\brief
Return the Bessel function of the second kind for each data point of this Data object.
*
*/
Data
besselSecondKind(int order);
/**
\brief
Return the tan of each data point of this Data object.
*
*/
Data
tan() const;
/**
\brief
Return the asin of each data point of this Data object.
*
*/
Data
asin() const;
/**
\brief
Return the acos of each data point of this Data object.
*
*/
Data
acos() const;
/**
\brief
Return the atan of each data point of this Data object.
*
*/
Data
atan() const;
/**
\brief
Return the sinh of each data point of this Data object.
*
*/
Data
sinh() const;
/**
\brief
Return the cosh of each data point of this Data object.
*
*/
Data
cosh() const;
/**
\brief
Return the tanh of each data point of this Data object.
*
*/
Data
tanh() const;
/**
\brief
Return the asinh of each data point of this Data object.
*
*/
Data
asinh() const;
/**
\brief
Return the acosh of each data point of this Data object.
*
*/
Data
acosh() const;
/**
\brief
Return the atanh of each data point of this Data object.
*
*/
Data
atanh() const;
/**
\brief
Return the log to base 10 of each data point of this Data object.
*
*/
Data
log10() const;
/**
\brief
Return the natural log of each data point of this Data object.
*
*/
Data
log() const;
/**
\brief
Return the exponential function of each data point of this Data object.
*
*/
Data
exp() const;
/**
\brief
Return the square root of each data point of this Data object.
*
*/
Data
sqrt() const;
/**
\brief
Return the negation of each data point of this Data object.
*
*/
Data
neg() const;
/**
\brief
Return the identity of each data point of this Data object.
Simply returns this object unmodified.
*
*/
Data
pos() const;
/**
\brief
Return the given power of each data point of this Data object.
\param right Input - the power to raise the object to.
*
*/
Data
powD(const Data& right) const;
/**
\brief
Return the given power of each data point of this boost python object.
\param right Input - the power to raise the object to.
*
*/
Data
powO(const boost::python::object& right) const;
/**
\brief
Return the given power of each data point of this boost python object.
\param left Input - the bases
*
*/
Data
rpowO(const boost::python::object& left) const;
/**
\brief
Overloaded operator +=
\param right - Input - The right hand side.
*
*/
Data& operator+=(const Data& right);
Data& operator+=(const boost::python::object& right);
Data& operator=(const Data& other);
/**
\brief
Overloaded operator -=
\param right - Input - The right hand side.
*
*/
Data& operator-=(const Data& right);
Data& operator-=(const boost::python::object& right);
/**
\brief
Overloaded operator *=
\param right - Input - The right hand side.
*
*/
Data& operator*=(const Data& right);
Data& operator*=(const boost::python::object& right);
/**
\brief
Overloaded operator /=
\param right - Input - The right hand side.
*
*/
Data& operator/=(const Data& right);
Data& operator/=(const boost::python::object& right);
/**
\brief
Newer style division operator for python
*/
Data truedivD(const Data& right);
/**
\brief
Newer style division operator for python
*/
Data truedivO(const boost::python::object& right);
/**
\brief
Newer style division operator for python
*/
Data rtruedivO(const boost::python::object& left);
/**
\brief
wrapper for python add operation
*/
boost::python::object __add__(const boost::python::object& right);
/**
\brief
wrapper for python subtract operation
*/
boost::python::object __sub__(const boost::python::object& right);
/**
\brief
wrapper for python reverse subtract operation
*/
boost::python::object __rsub__(const boost::python::object& right);
/**
\brief
wrapper for python multiply operation
*/
boost::python::object __mul__(const boost::python::object& right);
/**
\brief
wrapper for python divide operation
*/
boost::python::object __div__(const boost::python::object& right);
/**
\brief
wrapper for python reverse divide operation
*/
boost::python::object __rdiv__(const boost::python::object& right);
/**
\brief return inverse of matricies.
*/
Data
matrixInverse() const;
/**
\brief
Returns true if this can be interpolated to functionspace.
*/
bool
probeInterpolation(const FunctionSpace& functionspace) const;
/**
Data object slicing methods.
*/
/**
\brief
Returns a slice from this Data object.
/description
Implements the [] get operator in python.
Calls getSlice.
\param key - Input - python slice tuple specifying
slice to return.
*/
Data
getItem(const boost::python::object& key) const;
/**
\brief
Copies slice from value into this Data object.
Implements the [] set operator in python.
Calls setSlice.
\param key - Input - python slice tuple specifying
slice to copy from value.
\param value - Input - Data object to copy from.
*/
void
setItemD(const boost::python::object& key,
const Data& value);
void
setItemO(const boost::python::object& key,
const boost::python::object& value);
// These following public methods should be treated as private.
/**
\brief
Perform the given unary operation on every element of every data point in
this Data object.
*/
template <class UnaryFunction>
inline
void
unaryOp2(UnaryFunction operation);
/**
\brief
Return a Data object containing the specified slice of
this Data object.
\param region - Input - Region to copy.
*
*/
Data
getSlice(const DataTypes::RegionType& region) const;
/**
\brief
Copy the specified slice from the given value into this
Data object.
\param value - Input - Data to copy from.
\param region - Input - Region to copy.
*
*/
void
setSlice(const Data& value,
const DataTypes::RegionType& region);
/**
\brief
print the data values to stdout. Used for debugging
*/
void
print(void);
/**
\brief
return the MPI rank number of the local data
MPI_COMM_WORLD is assumed and the result of MPI_Comm_size()
is returned
*/
int
get_MPIRank(void) const;
/**
\brief
return the MPI rank number of the local data
MPI_COMM_WORLD is assumed and the result of MPI_Comm_rank()
is returned
*/
int
get_MPISize(void) const;
/**
\brief
return the MPI rank number of the local data
MPI_COMM_WORLD is assumed and returned.
*/
MPI_Comm
get_MPIComm(void) const;
/**
\brief
return the object produced by the factory, which is a DataConstant or DataExpanded
TODO Ownership of this object should be explained in doco.
*/
DataAbstract*
borrowData(void) const;
DataAbstract_ptr
borrowDataPtr(void) const;
DataReady_ptr
borrowReadyPtr(void) const;
/**
\brief
Return a pointer to the beginning of the datapoint at the specified offset.
TODO Eventually these should be inlined.
\param i - position(offset) in the underlying datastructure
*/
DataTypes::RealVectorType::const_reference
getDataAtOffsetRO(DataTypes::RealVectorType::size_type i, DataTypes::real_t dummy);
DataTypes::RealVectorType::reference
getDataAtOffsetRW(DataTypes::RealVectorType::size_type i, DataTypes::real_t dummy);
DataTypes::CplxVectorType::const_reference
getDataAtOffsetRO(DataTypes::CplxVectorType::size_type i, DataTypes::cplx_t dummy);
DataTypes::CplxVectorType::reference
getDataAtOffsetRW(DataTypes::CplxVectorType::size_type i, DataTypes::cplx_t dummy);
/**
\brief Ensures that the Data is expanded and returns its underlying vector
Does not check for exclusive write so do that before calling if sharing
Is a posibility.
\warning For domain implementors only. Using this function will
avoid using optimisations like lazy evaluation. It is intended
to allow quick initialisation of Data by domain; not as a bypass around
escript's other mechanisms.
*/
DataTypes::RealVectorType&
getExpandedVectorReference(DataTypes::real_t dummy=0);
DataTypes::CplxVectorType&
getExpandedVectorReference(DataTypes::cplx_t dummy);
/**
* \brief For tagged Data returns the number of tags with values.
* For non-tagged data will return 0 (even Data which has been expanded from tagged).
*/
size_t
getNumberOfTaggedValues() const;
/*
* \brief make the data complex
*/
void complicate();
protected:
private:
void init_from_data_and_fs(const Data& inData,
const FunctionSpace& functionspace);
template <typename S>
void
maskWorker(Data& other2, Data& mask2, S sentinel);
template <class BinaryOp>
DataTypes::real_t
#ifdef ESYS_MPI
lazyAlgWorker(DataTypes::real_t init, MPI_Op mpiop_type);
#else
lazyAlgWorker(DataTypes::real_t init);
#endif
DataTypes::real_t
LsupWorker() const;
DataTypes::real_t
supWorker() const;
DataTypes::real_t
infWorker() const;
template<typename Scalar>
boost::python::object
integrateWorker() const;
void
calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
void
calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
// For internal use in Data.cpp only!
// other uses should call the main entry points and allow laziness
Data
minval_nonlazy() const;
// For internal use in Data.cpp only!
Data
maxval_nonlazy() const;
/**
\brief
Check *this and the right operand are compatible. Throws
an exception if they aren't.
\param right - Input - The right hand side.
*/
inline
void
operandCheck(const Data& right) const
{
return m_data->operandCheck(*(right.m_data.get()));
}
/**
\brief
Perform the specified reduction algorithm on every element of every data point in
this Data object according to the given function and return the single value result.
*/
template <class BinaryFunction>
inline
DataTypes::real_t
reduction(BinaryFunction operation,
DataTypes::real_t initial_value) const;
/**
\brief
Reduce each data-point in this Data object using the given operation. Return a Data
object with the same number of data-points, but with each data-point containing only
one value - the result of the reduction operation on the corresponding data-point in
this Data object
*/
template <class BinaryFunction>
inline
Data
dp_algorithm(BinaryFunction operation,
DataTypes::real_t initial_value) const;
/**
\brief
Convert the data type of the RHS to match this.
\param right - Input - data type to match.
*/
void
typeMatchLeft(Data& right) const;
/**
\brief
Convert the data type of this to match the RHS.
\param right - Input - data type to match.
*/
void
typeMatchRight(const Data& right);
/**
\brief
Construct a Data object of the appropriate type.
*/
void
initialise(const DataTypes::RealVectorType& value,
const DataTypes::ShapeType& shape,
const FunctionSpace& what,
bool expanded);
void
initialise(const DataTypes::CplxVectorType& value,
const DataTypes::ShapeType& shape,
const FunctionSpace& what,
bool expanded);
void
initialise(const WrappedArray& value,
const FunctionSpace& what,
bool expanded);
void
initialise(const DataTypes::real_t value,
const DataTypes::ShapeType& shape,
const FunctionSpace& what,
bool expanded);
void
initialise(const DataTypes::cplx_t value,
const DataTypes::ShapeType& shape,
const FunctionSpace& what,
bool expanded);
//
// flag to protect the data object against any update
bool m_protected;
bool m_lazy;
//
// pointer to the actual data object
// boost::shared_ptr<DataAbstract> m_data;
DataAbstract_ptr m_data;
// If possible please use getReadyPtr instead.
// But see warning below.
const DataReady*
getReady() const
{
const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
ESYS_ASSERT(dr!=0, "error casting to DataReady.");
return dr;
}
DataReady*
getReady()
{
DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
ESYS_ASSERT(dr!=0, "error casting to DataReady.");
return dr;
}
// Be wary of using this for local operations since it (temporarily) increases reference count.
// If you are just using this to call a method on DataReady instead of DataAbstract consider using
// getReady() instead
DataReady_ptr
getReadyPtr()
{
DataReady_ptr dr=REFCOUNTNS::dynamic_pointer_cast<DataReady>(m_data);
ESYS_ASSERT(dr.get()!=0, "error casting to DataReady.");
return dr;
}
const_DataReady_ptr
getReadyPtr() const
{
const_DataReady_ptr dr=REFCOUNTNS::dynamic_pointer_cast<const DataReady>(m_data);
ESYS_ASSERT(dr.get()!=0, "error casting to DataReady.");
return dr;
}
// In the isShared() method below:
// A problem would occur if m_data (the address pointed to) were being modified
// while the call m_data->is_shared is being executed.
//
// Q: So why do I think this code can be thread safe/correct?
// A: We need to make some assumptions.
// 1. We assume it is acceptable to return true under some conditions when we aren't shared.
// 2. We assume that no constructions or assignments which will share previously unshared
// will occur while this call is executing. This is consistent with the way Data:: and C are written.
//
// This means that the only transition we need to consider, is when a previously shared object is
// not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
// In those cases the m_shared flag changes to false after m_data has completed changing.
// For any threads executing before the flag switches they will assume the object is still shared.
bool isShared() const
{
#ifdef SLOWSHARECHECK
return m_data->isShared(); // single threadsafe check for this
#else
return !m_data.unique();
#endif
}
void forceResolve()
{
if (isLazy())
{
#ifdef _OPENMP
if (omp_in_parallel())
{ // Yes this is throwing an exception out of an omp thread which is forbidden.
throw DataException("Please do not call forceResolve() in a parallel region.");
}
#endif
resolve();
}
}
/**
\brief if another object is sharing out member data make a copy to work with instead.
This code should only be called from single threaded sections of code.
*/
void exclusiveWrite()
{
#ifdef _OPENMP
if (omp_in_parallel())
{
throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
}
#endif
forceResolve();
if (isShared())
{
DataAbstract* t=m_data->deepCopy();
set_m_data(DataAbstract_ptr(t));
}
#ifdef EXWRITECHK
m_data->exclusivewritecalled=true;
#endif
}
/**
\brief checks if caller can have exclusive write to the object
*/
void checkExclusiveWrite()
{
if (isLazy() || isShared())
{
std::ostringstream oss;
oss << "Programming error. ExclusiveWrite required - please call requireWrite() isLazy=" << isLazy() << " isShared()=" << isShared();
throw DataException(oss.str());
}
}
/**
\brief Modify the data abstract hosted by this Data object
For internal use only.
Passing a pointer to null is permitted (do this in the destructor)
\warning Only to be called in single threaded code or inside a single/critical section. This method needs to be atomic.
*/
void set_m_data(DataAbstract_ptr p);
void TensorSelfUpdateBinaryOperation(const Data& right, escript::ES_optype operation);
friend class DataAbstract; // To allow calls to updateShareStatus
friend class TestDomain; // so its getX will work quickly
#ifdef IKNOWWHATIMDOING
friend Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
#endif
template <typename S>
friend Data condEvalWorker(escript::Data& mask, escript::Data& trueval, escript::Data& falseval, S sentinel);
friend Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed, const boost::python::tuple& filter);
};
#ifdef IKNOWWHATIMDOING
Data
applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
#endif
Data
condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
/**
\brief Create a new Expanded Data object filled with pseudo-random data.
*/
Data randomData(const boost::python::tuple& shape,
const FunctionSpace& what,
long seed, const boost::python::tuple& filter);
} // end namespace escript
// No, this is not supposed to be at the top of the file
// DataAbstact needs to be declared first, then DataReady needs to be fully declared
// so that I can dynamic cast between them below.
#include "DataReady.h"
#include "DataLazy.h"
#include "DataExpanded.h"
#include "DataConstant.h"
#include "DataTagged.h"
namespace escript
{
inline
DataTypes::real_t*
Data::getSampleDataRW(DataTypes::RealVectorType::size_type sampleNo, DataTypes::real_t dummy)
{
if (isLazy())
{
throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
}
#ifdef EXWRITECHK
if (!getReady()->exclusivewritecalled)
{
throw DataException("Error, call to Data::getSampleDataRW without a preceeding call to requireWrite/exclusiveWrite.");
}
#endif
return getReady()->getSampleDataRW(sampleNo, dummy);
}
inline
DataTypes::cplx_t*
Data::getSampleDataRW(DataTypes::CplxVectorType::size_type sampleNo, DataTypes::cplx_t dummy)
{
if (isLazy())
{
throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
}
#ifdef EXWRITECHK
if (!getReady()->exclusivewritecalled)
{
throw DataException("Error, call to Data::getSampleDataRW without a preceeding call to requireWrite/exclusiveWrite.");
}
#endif
return getReady()->getSampleDataRW(sampleNo, dummy);
}
inline
const DataTypes::real_t*
Data::getSampleDataRO(DataTypes::RealVectorType::size_type sampleNo,DataTypes::real_t dummy) const
{
DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
if (l!=0)
{
size_t offset=0;
const DataTypes::RealVectorType* res=l->resolveSample(sampleNo,offset);
return &((*res)[offset]);
}
return getReady()->getSampleDataRO(sampleNo, dummy);
}
inline
const DataTypes::cplx_t*
Data::getSampleDataRO(DataTypes::RealVectorType::size_type sampleNo, DataTypes::cplx_t dummy) const
{
DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
if (l!=0)
{
throw DataException("Programming error: complex lazy objects are not supported.");
}
return getReady()->getSampleDataRO(sampleNo, dummy);
}
inline
const DataTypes::real_t*
Data::getDataRO(DataTypes::real_t dummy) const
{
if (isLazy())
{
throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
}
if (getNumSamples()==0)
{
return 0;
}
else
{
return &(getReady()->getTypedVectorRO(0)[0]);
}
}
inline
const DataTypes::cplx_t*
Data::getDataRO(DataTypes::cplx_t dummy) const
{
if (isLazy())
{
throw DataException("Programmer error - getDataRO must not be called on Lazy Data.");
}
if (getNumSamples()==0)
{
return 0;
}
else
{
return &(getReady()->getTypedVectorRO(dummy)[0]);
}
}
/**
Binary Data object operators.
*/
inline DataTypes::real_t rpow(DataTypes::real_t x,DataTypes::real_t y)
{
return pow(y,x);
}
/**
\brief
Operator+
Takes two Data objects.
*/
Data operator+(const Data& left, const Data& right);
/**
\brief
Operator-
Takes two Data objects.
*/
Data operator-(const Data& left, const Data& right);
/**
\brief
Operator*
Takes two Data objects.
*/
Data operator*(const Data& left, const Data& right);
/**
\brief
Operator/
Takes two Data objects.
*/
Data operator/(const Data& left, const Data& right);
/**
\brief
Operator+
Takes LHS Data object and RHS python::object.
python::object must be convertable to Data type.
*/
Data operator+(const Data& left, const boost::python::object& right);
/**
\brief
Operator-
Takes LHS Data object and RHS python::object.
python::object must be convertable to Data type.
*/
Data operator-(const Data& left, const boost::python::object& right);
/**
\brief
Operator*
Takes LHS Data object and RHS python::object.
python::object must be convertable to Data type.
*/
Data operator*(const Data& left, const boost::python::object& right);
/**
\brief
Operator/
Takes LHS Data object and RHS python::object.
python::object must be convertable to Data type.
*/
Data operator/(const Data& left, const boost::python::object& right);
/**
\brief
Operator+
Takes LHS python::object and RHS Data object.
python::object must be convertable to Data type.
*/
Data operator+(const boost::python::object& left, const Data& right);
/**
\brief
Operator-
Takes LHS python::object and RHS Data object.
python::object must be convertable to Data type.
*/
Data operator-(const boost::python::object& left, const Data& right);
/**
\brief
Operator*
Takes LHS python::object and RHS Data object.
python::object must be convertable to Data type.
*/
Data operator*(const boost::python::object& left, const Data& right);
/**
\brief
Operator/
Takes LHS python::object and RHS Data object.
python::object must be convertable to Data type.
*/
Data operator/(const boost::python::object& left, const Data& right);
/**
\brief
Output operator
*/
std::ostream& operator<<(std::ostream& o, const Data& data);
/**
\brief
Compute a tensor product of two Data objects
\param arg_0 - Input - Data object
\param arg_1 - Input - Data object
\param axis_offset - Input - axis offset
\param transpose - Input - 0: transpose neither, 1: transpose arg0, 2: transpose arg1
*/
Data
C_GeneralTensorProduct(Data& arg_0,
Data& arg_1,
int axis_offset=0,
int transpose=0);
/**
\brief
Operator/
Takes RHS Data object.
*/
inline
Data
Data::truedivD(const Data& right)
{
return *this / right;
}
/**
\brief
Operator/
Takes RHS python::object.
*/
inline
Data
Data::truedivO(const boost::python::object& right)
{
Data tmp(right, getFunctionSpace(), false);
return truedivD(tmp);
}
/**
\brief
Operator/
Takes LHS python::object.
*/
inline
Data
Data::rtruedivO(const boost::python::object& left)
{
Data tmp(left, getFunctionSpace(), false);
return tmp.truedivD(*this);
}
/**
\brief
Perform the given Data object reduction algorithm on this and return the result.
Given operation combines each element of each data point, thus argument
object (*this) is a rank n Data object, and returned object is a scalar.
Calls escript::algorithm.
*/
template <class BinaryFunction>
inline
DataTypes::real_t
Data::reduction(BinaryFunction operation, DataTypes::real_t initial_value) const
{
if (isExpanded()) {
DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
ESYS_ASSERT(leftC!=0, "Programming error - casting to DataExpanded.");
DataExpanded& data=*leftC;
int i,j;
int numDPPSample=data.getNumDPPSample();
int numSamples=data.getNumSamples();
DataTypes::real_t global_current_value=initial_value;
DataTypes::real_t local_current_value;
const auto& vec=data.getTypedVectorRO(typename BinaryFunction::first_argument_type(0));
const DataTypes::ShapeType& shape=data.getShape();
// calculate the reduction operation value for each data point
// reducing the result for each data-point into the current_value variables
#pragma omp parallel private(local_current_value)
{
local_current_value=initial_value;
#pragma omp for private(i,j) schedule(static)
for (i=0;i<numSamples;i++) {
for (j=0;j<numDPPSample;j++) {
local_current_value=operation(local_current_value,escript::reductionOpVector(vec,shape,data.getPointOffset(i,j),operation,initial_value));
}
}
#pragma omp critical
global_current_value=operation(global_current_value,local_current_value);
}
return global_current_value;
} else if (isTagged()) {
DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
ESYS_ASSERT(leftC!=0, "Programming error - casting to DataTagged.");
DataTagged& data=*leftC;
DataTypes::real_t current_value=initial_value;
const auto& vec=data.getTypedVectorRO(typename BinaryFunction::first_argument_type(0));
const DataTypes::ShapeType& shape=data.getShape();
const DataTagged::DataMapType& lookup=data.getTagLookup();
const std::list<int> used=data.getFunctionSpace().getListOfTagsSTL();
for (std::list<int>::const_iterator i=used.begin();i!=used.end();++i)
{
int tag=*i;
DataTagged::DataMapType::const_iterator it=lookup.find(tag);
if ((tag==0) || (it==lookup.end())) // check for the default tag
{
current_value=operation(current_value,escript::reductionOpVector(vec,shape,data.getDefaultOffset(),operation,initial_value));
}
else
{
current_value=operation(current_value,escript::reductionOpVector(vec,shape,it->second,operation,initial_value));
}
}
return current_value;
} else if (isConstant()) {
DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
ESYS_ASSERT(leftC!=0, "Programming error - casting to DataConstant.");
return escript::reductionOpVector(leftC->getTypedVectorRO(typename BinaryFunction::first_argument_type(0)),leftC->getShape(),0,operation,initial_value);
} else if (isEmpty()) {
throw DataException("Error - Operations (algorithm) not permitted on instances of DataEmpty.");
} else if (isLazy()) {
throw DataException("Error - Operations not permitted on instances of DataLazy.");
} else {
throw DataException("Error - Data encapsulates an unknown type.");
}
}
/**
\brief
Perform the given data point reduction algorithm on data and return the result.
Given operation combines each element within each data point into a scalar,
thus argument object is a rank n Data object, and returned object is a
rank 0 Data object.
Calls escript::dp_algorithm.
*/
template <class BinaryFunction>
inline
Data
Data::dp_algorithm(BinaryFunction operation, DataTypes::real_t initial_value) const
{
if (isEmpty()) {
throw DataException("Error - Operations (dp_algorithm) not permitted on instances of DataEmpty.");
}
else if (isExpanded()) {
Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
ESYS_ASSERT(dataE!=0, "Programming error - casting data to DataExpanded.");
ESYS_ASSERT(resultE!=0, "Programming error - casting result to DataExpanded.");
int i,j;
int numSamples=dataE->getNumSamples();
int numDPPSample=dataE->getNumDPPSample();
// DataArrayView dataView=data.getPointDataView();
// DataArrayView resultView=result.getPointDataView();
const auto& dataVec=dataE->getTypedVectorRO(initial_value);
const DataTypes::ShapeType& shape=dataE->getShape();
auto& resultVec=resultE->getTypedVectorRW(initial_value);
// perform the operation on each data-point and assign
// this to the corresponding element in result
#pragma omp parallel for private(i,j) schedule(static)
for (i=0;i<numSamples;i++) {
for (j=0;j<numDPPSample;j++) {
resultVec[resultE->getPointOffset(i,j)] =
escript::reductionOpVector(dataVec, shape, dataE->getPointOffset(i,j),operation,initial_value);
}
}
//escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
return result;
}
else if (isTagged()) {
DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
ESYS_ASSERT(dataT!=0, "Programming error - casting data to DataTagged.");
DataTypes::RealVectorType defval(1);
defval[0]=0;
DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
const DataTypes::ShapeType& shape=dataT->getShape();
const auto& vec=dataT->getTypedVectorRO(initial_value);
const DataTagged::DataMapType& lookup=dataT->getTagLookup();
for (DataTagged::DataMapType::const_iterator i=lookup.begin(); i!=lookup.end(); i++) {
resultT->getDataByTagRW(i->first,0) =
escript::reductionOpVector(vec,shape,dataT->getOffsetForTag(i->first),operation,initial_value);
}
resultT->getTypedVectorRW(initial_value)[resultT->getDefaultOffset()] = escript::reductionOpVector(dataT->getTypedVectorRO(initial_value),dataT->getShape(),dataT->getDefaultOffset(),operation,initial_value);
//escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
return Data(resultT); // note: the Data object now owns the resultT pointer
}
else if (isConstant()) {
Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
ESYS_ASSERT(dataC!=0, "Programming error - casting data to DataConstant.");
ESYS_ASSERT(resultC!=0, "Programming error - casting result to DataConstant.");
DataConstant& data=*dataC;
resultC->getTypedVectorRW(initial_value)[0] =
escript::reductionOpVector(data.getTypedVectorRO(initial_value),data.getShape(),0,operation,initial_value);
//escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
return result;
} else if (isLazy()) {
throw DataException("Error - Operations not permitted on instances of DataLazy.");
} else {
throw DataException("Error - Data encapsulates an unknown type.");
}
}
/**
\brief
Compute a tensor operation with two Data objects
\param arg_0 - Input - Data object
\param arg_1 - Input - Data object
\param operation - Input - Binary op functor
*/
Data
C_TensorBinaryOperation(Data const &arg_0,
Data const &arg_1,
ES_optype operation);
Data
C_TensorUnaryOperation(Data const &arg_0,
escript::ES_optype operation,
DataTypes::real_t tol=0);
} // namespace escript
#endif // __ESCRIPT_DATA_H__
|
GB_helper.c | //------------------------------------------------------------------------------
// GB_helper.c: helper functions for @GrB interface
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// TODO::: move these into the @GrB interface instead
// These functions are only used by the @GrB interface for
// SuiteSparse:GraphBLAS.
#include "GB_helper.h"
//------------------------------------------------------------------------------
// GB_NTHREADS: determine the number of threads to use
//------------------------------------------------------------------------------
#define GB_NTHREADS(work) \
int nthreads_max = GB_Global_nthreads_max_get ( ) ; \
double chunk = GB_Global_chunk_get ( ) ; \
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//------------------------------------------------------------------------------
// GB_ALLOCATE_WORK: allocate per-thread workspace
//------------------------------------------------------------------------------
#define GB_ALLOCATE_WORK(work_type) \
size_t Work_size ; \
work_type *Work = GB_MALLOC (nthreads, work_type, &Work_size) ; \
if (Work == NULL) return (false) ;
//------------------------------------------------------------------------------
// GB_FREE_WORK: free per-thread workspace
//------------------------------------------------------------------------------
#define GB_FREE_WORK(work_type) \
GB_FREE (&Work, Work_size) ;
//------------------------------------------------------------------------------
// GB_helper1: convert 0-based indices to 1-based for gbextracttuples
//------------------------------------------------------------------------------
void GB_helper1 // convert zero-based indices to one-based
(
double *restrict I_double, // output array
const GrB_Index *restrict I, // input array
int64_t nvals // size of input and output arrays
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
I_double [k] = (double) (I [k] + 1) ;
}
}
//------------------------------------------------------------------------------
// GB_helper1i: convert 0-based indices to 1-based for gbextracttuples
//------------------------------------------------------------------------------
void GB_helper1i // convert zero-based indices to one-based
(
int64_t *restrict I, // input/output array
int64_t nvals // size of input/output array
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
I [k] ++ ;
}
}
//------------------------------------------------------------------------------
// GB_helper3: convert 1-based indices to 0-based for gb_mxarray_to_list
//------------------------------------------------------------------------------
bool GB_helper3 // return true if OK, false on error
(
int64_t *restrict List, // size len, output array
const double *restrict List_double, // size len, input array
int64_t len,
int64_t *List_max // also compute the max entry in the list
)
{
GB_NTHREADS (len) ;
ASSERT (List != NULL) ;
ASSERT (List_double != NULL) ;
ASSERT (List_max != NULL) ;
bool ok = true ;
int64_t listmax = -1 ;
GB_ALLOCATE_WORK (int64_t) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
bool my_ok = true ;
int64_t k1, k2, my_listmax = -1 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
double x = List_double [k] ;
int64_t i = (int64_t) x ;
my_ok = my_ok && (x == (double) i) ;
my_listmax = GB_IMAX (my_listmax, i) ;
List [k] = i - 1 ;
}
// rather than create a separate per-thread boolean workspace, just
// use a sentinal value of INT64_MIN if non-integer indices appear
// in List_double.
Work [tid] = my_ok ? my_listmax : INT64_MIN ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
ok = ok && (Work [tid] != INT64_MIN) ;
}
GB_FREE_WORK (int64_t) ;
(*List_max) = listmax ;
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_helper3i: convert 1-based indices to 0-based for gb_mxarray_to_list
//------------------------------------------------------------------------------
bool GB_helper3i // return true if OK, false on error
(
int64_t *restrict List, // size len, output array
const int64_t *restrict List_int64, // size len, input array
int64_t len,
int64_t *List_max // also compute the max entry in the list
)
{
GB_NTHREADS (len) ;
int64_t listmax = -1 ;
GB_ALLOCATE_WORK (int64_t) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2, my_listmax = -1 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
int64_t i = List_int64 [k] ;
my_listmax = GB_IMAX (my_listmax, i) ;
List [k] = i - 1 ;
}
Work [tid] = my_listmax ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
}
GB_FREE_WORK (int64_t) ;
(*List_max) = listmax ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_helper4: find the max entry in an index list for gbbuild
//------------------------------------------------------------------------------
bool GB_helper4 // return true if OK, false on error
(
const GrB_Index *restrict I, // array of size len
const int64_t len,
GrB_Index *List_max // find max (I) + 1
)
{
GB_NTHREADS (len) ;
GrB_Index listmax = 0 ;
GB_ALLOCATE_WORK (GrB_Index) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2 ;
GrB_Index my_listmax = 0 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
my_listmax = GB_IMAX (my_listmax, I [k]) ;
}
Work [tid] = my_listmax ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
}
GB_FREE_WORK (GrB_Index) ;
if (len > 0) listmax++ ;
(*List_max) = listmax ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_helper5: construct pattern of S for gblogassign
//------------------------------------------------------------------------------
void GB_helper5 // construct pattern of S
(
GrB_Index *restrict Si, // array of size anz
GrB_Index *restrict Sj, // array of size anz
const GrB_Index *restrict Mi, // array of size mnz, M->i, may be NULL
const GrB_Index *restrict Mj, // array of size mnz,
const int64_t mvlen, // M->vlen
GrB_Index *restrict Ai, // array of size anz, A->i, may be NULL
const int64_t avlen, // M->vlen
const GrB_Index anz
)
{
GB_NTHREADS (anz) ;
ASSERT (Mj != NULL) ;
ASSERT (Si != NULL) ;
ASSERT (Sj != NULL) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < anz ; k++)
{
int64_t i = GBI (Ai, k, avlen) ;
Si [k] = GBI (Mi, i, mvlen) ;
Sj [k] = Mj [i] ;
}
}
//------------------------------------------------------------------------------
// GB_helper7: Kx = uint64 (0:mnz-1), for gblogextract
//------------------------------------------------------------------------------
// TODO: use GrB_apply with a positional operator instead
void GB_helper7 // Kx = uint64 (0:mnz-1)
(
uint64_t *restrict Kx, // array of size mnz
const GrB_Index mnz
)
{
GB_NTHREADS (mnz) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < mnz ; k++)
{
Kx [k] = k ;
}
}
//------------------------------------------------------------------------------
// GB_helper8: expand a scalar into an array for gbbuild
//------------------------------------------------------------------------------
// TODO: use GrB_assign instead
void GB_helper8
(
GB_void *C, // output array of size nvals * s
GB_void *A, // input scalar of size s
GrB_Index nvals, // size of C
size_t s // size of each scalar
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
// C [k] = A [0]
memcpy (C + k * s, A, s) ;
}
}
//------------------------------------------------------------------------------
// GB_helper10: compute norm (x-y,p) of two dense FP32 or FP64 vectors
//------------------------------------------------------------------------------
// p can be:
// 0 or 2: 2-norm, sqrt (sum ((x-y).^2))
// 1: 1-norm, sum (abs (x-y))
// INT64_MAX inf-norm, max (abs (x-y))
// INT64_MIN (-inf)-norm, min (abs (x-y))
// other: p-norm not yet computed
double GB_helper10 // norm (x-y,p), or -1 on error
(
GB_void *x_arg, // float or double, depending on type parameter
bool x_iso, // true if x is iso
GB_void *y_arg, // same type as x, treat as zero if NULL
bool y_iso, // true if x is iso
GrB_Type type, // GrB_FP32 or GrB_FP64
int64_t p, // 0, 1, 2, INT64_MIN, or INT64_MAX
GrB_Index n
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (!(type == GrB_FP32 || type == GrB_FP64))
{
// type of x and y must be GrB_FP32 or GrB_FP64
return ((double) -1) ;
}
if (n == 0)
{
return ((double) 0) ;
}
//--------------------------------------------------------------------------
// allocate workspace and determine # of threads to use
//--------------------------------------------------------------------------
GB_NTHREADS (n) ;
GB_ALLOCATE_WORK (double) ;
#define X(k) x [x_iso ? 0 : k]
#define Y(k) y [y_iso ? 0 : k]
//--------------------------------------------------------------------------
// each thread computes its partial norm
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2 ;
GB_PARTITION (k1, k2, n, tid, nthreads) ;
if (type == GrB_FP32)
{
//------------------------------------------------------------------
// FP32 case
//------------------------------------------------------------------
float my_s = 0 ;
const float *x = (float *) x_arg ;
const float *y = (float *) y_arg ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
float t = X (k) ;
my_s += (t*t) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
float t = (X (k) - Y (k)) ;
my_s += (t*t) ;
}
}
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabsf (X (k)) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabsf (X (k) - Y (k)) ;
}
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmaxf (my_s, fabsf (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmaxf (my_s, fabsf (X (k) - Y (k))) ;
}
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
my_s = INFINITY ;
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fminf (my_s, fabsf (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fminf (my_s, fabsf (X (k) - Y (k))) ;
}
}
}
break ;
default: ; // p-norm not yet supported
}
Work [tid] = (double) my_s ;
}
else
{
//------------------------------------------------------------------
// FP64 case
//------------------------------------------------------------------
double my_s = 0 ;
const double *x = (double *) x_arg ;
const double *y = (double *) y_arg ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
double t = X (k) ;
my_s += (t*t) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
double t = (X (k) - Y (k)) ;
my_s += (t*t) ;
}
}
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabs (X (k)) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabs (X (k) - Y (k)) ;
}
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmax (my_s, fabs (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmax (my_s, fabs (X (k) - Y (k))) ;
}
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
my_s = INFINITY ;
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmin (my_s, fabs (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmin (my_s, fabs (X (k) - Y (k))) ;
}
}
}
break ;
default: ; // p-norm not yet supported
}
Work [tid] = my_s ;
}
}
//--------------------------------------------------------------------------
// combine results of each thread
//--------------------------------------------------------------------------
double s = 0 ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s += Work [tid] ;
}
s = sqrt (s) ;
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s += Work [tid] ;
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s = fmax (s, Work [tid]) ;
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
s = Work [0] ;
for (int64_t tid = 1 ; tid < nthreads ; tid++)
{
s = fmin (s, Work [tid]) ;
}
}
break ;
default: // p-norm not yet supported
s = -1 ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK (double) ;
return (s) ;
}
|
equation_of_state.c | /*
A simple 2D hydro code
(C) Romain Teyssier : CEA/IRFU -- original F90 code
(C) Pierre-Francois Lavallee : IDRIS -- original F90 code
(C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version
*/
/*
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
// #include <stdlib.h>
// #include <unistd.h>
#include <math.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifndef HMPP
#include "equation_of_state.h"
#include "parametres.h"
#include "perfcnt.h"
#include "utils.h"
void
equation_of_state(int imin,
int imax,
const int Hnxyt,
const int Hnvar,
const real_t Hsmallc,
const real_t Hgamma,
const int slices, const int Hstep,
real_t eint[Hstep][Hnxyt], real_t q[Hnvar][Hstep][Hnxyt], real_t c[Hstep][Hnxyt]) {
int k, s;
int inpar = 0;
real_t smallp;
WHERE("equation_of_state");
smallp = Square(Hsmallc) / Hgamma;
FLOPS(1, 1, 0, 0);
// printf("EOS: %d %d %d %d %g %g %d %d\n", imin, imax, Hnxyt, Hnvar, Hsmallc, Hgamma, slices, Hstep);
#ifdef _OPENMP
inpar = omp_in_parallel();
//#pragma omp parallel for if (!inpar) schedule(auto) private(s,k), shared(c,q), collapse(2)
#pragma omp parallel for private(s,k), shared(c,q) COLLAPSE
#endif
for (s = 0; s < slices; s++) {
for (k = imin; k < imax; k++) {
register real_t rhok = q[ID][s][k];
register real_t base = (Hgamma - one) * rhok * eint[s][k];
base = MAX(base, (real_t) (rhok * smallp));
q[IP][s][k] = base;
c[s][k] = sqrt(Hgamma * base / rhok);
}
}
{
int nops = slices * (imax - imin);
FLOPS(5 * nops, 2 * nops, 1 * nops, 0 * nops);
}
} // equation_of_state
#endif
// EOF
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.