source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
ray_tracing.h | #ifndef _RAY_TRACING_H_
#define _RAY_TRACING_H_
#include <glm.hpp>
#include <ctime>
#include <cmath>
#include <random>
#include "omp.h"
#include "model.h"
#define NS 1
#define RAY_DEEPTH 9
// 3D model + pose and scale
struct Obj
{
Model &model;
glm::dmat4x4 pose = glm::dmat4x4(1.0);
double scale = 1.0;
Obj(Model &m_) : model(m_) {}
void set_pose(glm::dmat4x4 pose_, double s_)
{
pose = pose_;
scale = s_;
}
};
class FrameBuffer
{
public:
uint32_t *fb_; // RGBA from memory low to high , y: ^ x: ->
glm::dvec3 *fb2_; // hdr rgb vector
int w_, h_; // screen width, height
FrameBuffer(int w, int h) : w_(w), h_(h)
{
fb_ = new uint32_t[w * h];
fb2_ = new glm::dvec3[w * h];
}
~FrameBuffer()
{
if (fb_)
{
delete[] fb_;
fb_ = NULL;
}
if (fb2_)
{
delete[] fb2_;
fb2_ = NULL;
}
}
inline void clear()
{
for (int i = 0; i < w_ * h_; ++i)
fb_[i] = 0;
for (int i = 0; i < w_ * h_; ++i)
fb2_[i] = glm::dvec3(0, 0, 0);
}
inline void set_pixel(int x, int y, uint32_t color) { fb_[y * w_ + x] = color; }
// 累加光照强度..
inline void accumulate(int x, int y, glm::dvec3 color) { fb2_[y * w_ + x] += color; }
// 平均光照强度..
inline void show(double n, double adapted_lum)
{
for (int i = 0; i < w_ * h_; ++i)
fb_[i] = toRGB(ToneMapping(fb2_[i] / n, adapted_lum));
}
};
struct BRDF
{
std::random_device rd;
std::mt19937 gen;
std::uniform_real_distribution<double> dis1;
std::uniform_real_distribution<double> dis2;
BRDF() : gen(rd()), dis1(0, 1), dis2(-1, 1) {}
inline double randf1() { return dis1(gen); }
inline double randf2() { return dis2(gen); }
inline glm::dvec3 rand_sphere()
{
glm::dvec3 ret;
do
{
ret.x = randf2();
ret.y = randf2();
ret.z = randf2();
} while (glm::dot(ret, ret) > 1.0);
return glm::normalize(ret);
}
// 法向量坐标系转世界坐标系.. input: unit vector
inline glm::dmat3x3 norm_to_world(glm::dvec3 n)
{
if (n.z > 0.999999)
return {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
if (n.z < -0.999999)
return {{-1, 0, 0}, {0, 1, 0}, {0, 0, -1}};
double cos_theta0 = n.z;
double sin_theta0 = glm::sqrt(1.0 - cos_theta0 * cos_theta0);
double cos_phi0 = n.x / sin_theta0;
double sin_phi0 = n.y / sin_theta0;
return {{cos_theta0 * cos_phi0, -sin_phi0, n.x},
{cos_theta0 * sin_phi0, cos_phi0, n.y},
{-sin_theta0, 0.0, n.z}};
}
// phong 散射 重要性采样..
inline glm::dvec3 diffuse(glm::dvec3 &n)
{
double u1 = randf1();
double sin_theta = glm::sqrt(1.0 - u1);
double cos_theta = glm::sqrt(u1);
double phi = 2.0 * std::_Pi * randf1();
double sin_phi = glm::sin(phi);
double cos_phi = glm::cos(phi);
glm::dvec3 out(sin_theta * cos_phi, sin_theta * sin_phi, cos_theta + 1e-20);
return glm::normalize(out * norm_to_world(n));
}
// phong 高光反射 重要性采样.
inline glm::dvec3 specular(glm::dvec3 &ref, double Ns)
{
double cos_theta = std::pow(randf1(), 1.0 / (Ns + 1.0));
double sin_theta = glm::sqrt(1.0 - cos_theta * cos_theta);
double phi = 2.0 * std::_Pi * randf1();
double sin_phi = glm::sin(phi);
double cos_phi = glm::cos(phi);
glm::dvec3 out(sin_theta * cos_phi, sin_theta * sin_phi, cos_theta + 1e-20);
return glm::normalize(out * norm_to_world(ref));
}
inline glm::dvec3 refract(glm::dvec3 &in, glm::dvec3 &n, double eta)
{
double N_dot_I = glm::dot(n, in);
double k = 1.0 - eta * eta * (1.0 - N_dot_I * N_dot_I);
if (k < 0)
return glm::reflect(in, n);
else
return glm::normalize(eta * in - (eta * N_dot_I + glm::sqrt(k)) * n);
}
};
class BVH;
// vertex + material
struct Face3D
{
glm::dvec3 v1, v2, v3; // 3 vertexes
glm::dvec3 n1, n2, n3; // 3 norms
glm::dvec2 uv1, uv2, uv3; // 3 uv_coordinates
glm::dvec3 A, B; // bounding box
Material &m;
inline void print_vertex()
{
std::cout << print_vec3(v1) << print_vec3(v2) << print_vec3(v3) << std::endl;
}
};
struct HitInfo
{
BVH *box;
double t1, t2;
};
struct Ray
{
glm::dvec3 p; // start point
glm::dvec3 d; // direction
glm::dvec3 color; // rgb
int deepth = 0; // 反射次数..
Face3D *hit_face = NULL;
double hit_time = DBL_MAX;
glm::dvec3 barycentric; // 重心坐标系 系数..
std::vector<HitInfo> hit_results;
};
class BVH
{
public:
BVH *left = NULL;
BVH *right = NULL; // child node
glm::dvec3 A = glm::dvec3(DBL_MAX, DBL_MAX, DBL_MAX); // bounding box
glm::dvec3 B = glm::dvec3(-DBL_MAX, -DBL_MAX, -DBL_MAX);
std::vector<Face3D *> faces;
BVH() {}
inline void hit_test(Ray &ray)
{
glm::dvec3 temp = ray.d;
if (ray.d.x == 0)
ray.d.x = 1e-20;
if (ray.d.y == 0)
ray.d.y = 1e-20;
if (ray.d.z == 0)
ray.d.z = 1e-20;
hit_test_box_(ray);
ray.d = temp;
std::vector<HitInfo> &result = ray.hit_results;
if (result.empty())
return;
std::sort(std::begin(result), std::end(result), [](HitInfo &a, HitInfo &b) -> bool { return a.t1 < b.t1; });
for (auto i : result)
{
if (i.t1 >= ray.hit_time)
break;
hit_test_faces(ray, i.box->faces);
}
result.clear();
return;
}
inline void hit_test_box_(Ray &ray)
{
glm::dvec3 t1 = (A - ray.p) / ray.d;
glm::dvec3 t2 = (B - ray.p) / ray.d;
if (t1.x > t2.x)
std::swap(t1.x, t2.x);
if (t1.y > t2.y)
std::swap(t1.y, t2.y);
if (t1.z > t2.z)
std::swap(t1.z, t2.z);
double box_t1 = max3(t1.x, t1.y, t1.z);
double box_t2 = min3(t2.x, t2.y, t2.z);
if (box_t1 <= box_t2 && box_t2 > 0)
{
if (left)
left->hit_test_box_(ray);
if (right)
right->hit_test_box_(ray);
if (!faces.empty()) // leaf node
ray.hit_results.push_back({this, box_t1, box_t2});
}
return;
}
// 光线与一些三角形求交..
inline void hit_test_faces(Ray &ray, std::vector<Face3D *> &faces)
{
for (Face3D *face : faces)
{
glm::dvec3 d = ray.d;
glm::dvec3 p0 = face->v1;
glm::dvec3 e1 = face->v2 - p0;
glm::dvec3 e2 = face->v3 - p0;
glm::dvec3 q = glm::cross(d, e2);
double a = glm::dot(e1, q);
if (a == 0)
continue;
double f = 1 / a;
glm::dvec3 s = ray.p - p0;
double u = f * glm::dot(s, q);
if (u < 0)
continue;
glm::dvec3 r = glm::cross(s, e1);
double v = f * glm::dot(d, r);
if (v < 0 || 1 - u - v < 0)
continue;
double t = f * glm::dot(e2, r);
if (t > 1e-10 && t <= ray.hit_time)
{
ray.hit_time = t;
ray.hit_face = face; // 如果相交,指针不为 NULL
ray.barycentric = glm::dvec3(1.0 - u - v, u, v);
}
}
}
inline void insert_face(Face3D *f)
{
faces.push_back(f);
A.x = min_(f->A.x, A.x);
A.y = min_(f->A.y, A.y);
A.z = min_(f->A.z, A.z);
B.x = max_(f->B.x, B.x);
B.y = max_(f->B.y, B.y);
B.z = max_(f->B.z, B.z);
}
inline void build_subtree()
{
if (faces.size() < 9)
return;
left = new BVH();
right = new BVH();
glm::dvec3 size = abs(B - A);
if (size.x > size.y && size.x > size.z)
std::sort(std::begin(faces), std::end(faces), [](Face3D *a, Face3D *b) -> bool { return a->A.x < b->A.x; });
else if (size.y > size.z)
std::sort(std::begin(faces), std::end(faces), [](Face3D *a, Face3D *b) -> bool { return a->A.y < b->A.y; });
else
std::sort(std::begin(faces), std::end(faces), [](Face3D *a, Face3D *b) -> bool { return a->A.z < b->A.z; });
unsigned i = 0;
for (; i < faces.size() / 2; ++i)
left->insert_face(faces[i]);
for (; i < faces.size(); ++i)
right->insert_face(faces[i]);
faces.clear();
left->build_subtree();
right->build_subtree();
}
~BVH()
{
if (left)
{
delete left;
left = NULL;
}
if (right)
{
delete right;
right = NULL;
}
}
};
class Render
{
public:
// coordinate transformation, coord_world = rotate * coord_camera + position
double camera_distance = 100.0; // 像距,越大则物体越大..
glm::dvec3 camera_position = glm::dvec3(0, 0, 0); //
glm::dmat3x3 camera_rotate = glm::dmat3x3(1.0); //
FrameBuffer fb; // frame buffer
glm::dvec3 background = glm::dvec3(0, 0, 0); //
double adapted_lum = 1; // hdr to ldr
std::vector<Obj *> objs;
std::vector<Face3D> faces;
BVH bvh;
BRDF brdf;
Picture skybox;
glm::dmat3x3 skybox_rotate = glm::dmat3x3(1.0);
std::vector<glm::ivec2> pixels;
clock_t timer;
int n_threads = 1;
bool _init = true;
int n_sample = 0;
Render(int w, int h) : fb(FrameBuffer(w, h))
{
n_threads = omp_get_max_threads() - 2;
omp_set_num_threads(n_threads);
}
~Render() {}
inline void set_camera(glm::dvec3 position, glm::dvec3 lookat, glm::dvec3 up, double view)
{
camera_position = position;
camera_distance = fb.h_ / 2.0 / tan(view / 180.0 * std::_Pi / 2.0);
glm::dvec3 z = lookat - position;
glm::dvec3 y = up;
glm::dvec3 x = glm::cross(y, z);
z = glm::cross(x, y);
z = glm::normalize(z);
y = glm::normalize(y);
x = glm::normalize(x);
camera_rotate[0][0] = x[0];
camera_rotate[1][0] = x[1];
camera_rotate[2][0] = x[2];
camera_rotate[0][1] = y[0];
camera_rotate[1][1] = y[1];
camera_rotate[2][1] = y[2];
camera_rotate[0][2] = z[0];
camera_rotate[1][2] = z[1];
camera_rotate[2][2] = z[2];
}
inline void add_obj(Obj &Model) { objs.push_back(&Model); }
void set_skybox(std::string filename) { skybox.load(filename); }
inline glm::dvec3 skybox_color(glm::dvec3 &d)
{
if (skybox.empty_)
return background;
glm::vec3 dir_ = d * skybox_rotate;
double y = 1.0 - glm::acos(dir_.y) / std::_Pi;
double x = 0;
if (dir_.z != 0)
x = dir_.z > 0 ? glm::atan(dir_.x / dir_.z) : std::_Pi + glm::atan(dir_.x / dir_.z);
x = 0.75 - x / (std::_Pi * 2);
return skybox.Sample2D(glm::dvec2(x, y));
}
int render()
{
if (_init) // each frame
{
timer = clock();
n_sample = 0;
_init = false;
bvh = BVH();
fb.clear();
faces.clear();
for (Obj *i : objs)
transform_face(i);
for (Face3D &i : faces)
bvh.insert_face(&i);
bvh.build_subtree();
for (int x = 0; x < fb.w_; ++x) // current line
{
for (int y = 0; y < fb.h_; ++y) // current pixel
pixels.push_back({x, y});
}
// shuffle pixels to average cpu time of each thread
for (int i = pixels.size() - 1; i > 0; --i)
{
int shuff_idx = brdf.gen() % (i + 1);
std::swap(pixels[shuff_idx], pixels[i]);
}
std::cout << "[init] " << get_time_ms()
<< " ms\tfaces:" << faces.size() << '\n';
}
#pragma omp parallel for
for (int i = 0; i < pixels.size(); ++i)
{
int x = pixels[i].x;
int y = pixels[i].y;
for (int j = 0; j < NS; ++j)
fb.accumulate(x, y, ray_casting(x, y));
}
n_sample += NS;
fb.show(n_sample, adapted_lum);
std::cout << "[sample] " << n_sample << "\tt: " << get_time_ms() / 1000 << " s\n";
return n_sample;
}
inline glm::dvec3 ray_casting(int x, int y)
{
Ray ray;
ray.p = camera_position;
double xx = fb.w_ / 2 - x + brdf.randf2() * 0.2;
double yy = y - fb.h_ / 2 + brdf.randf2() * 0.2;
double zz = camera_distance;
glm::dvec3 direction(xx, yy, zz);
ray.d = glm::normalize(direction * camera_rotate);
ray_tracing(ray);
return ray.color;
}
inline void ray_tracing(Ray &ray)
{
if (ray.deepth > RAY_DEEPTH)
{
ray.color = glm::dvec3(0, 0, 0);
return;
}
ray.hit_face = NULL;
ray.hit_time = DBL_MAX;
bvh.hit_test(ray);
if (!ray.hit_face)
{
ray.color = skybox_color(ray.d);
return;
}
ray.deepth += 1;
Face3D *f = ray.hit_face;
Material &m = f->m;
glm::dvec3 coord = ray.barycentric;
glm::dvec3 N_ = glm::normalize(coord.x * f->n1 + coord.y * f->n2 + coord.z * f->n3);
glm::dvec2 uv_ = coord.x * f->uv1 + coord.y * f->uv2 + coord.z * f->uv3;
uv_.x -= std::floor(uv_.x);
uv_.y -= std::floor(uv_.y);
glm::dvec3 V_ = ray.d; // visual dir
glm::dvec3 R_ = glm::reflect(V_, N_); // reflect dir
// 30% time
glm::dvec3 K; // 1 - 吸收率..
glm::dvec3 L_; // simple direction
double rand_ = brdf.randf1();
double pkd = m.kd;
double pks = pkd + m.ks;
double pkr = pks + m.kr;
if (rand_ < pkd)
{
L_ = brdf.diffuse(N_);
if (m.Map_Kd.empty_)
K = m.Kd;
else
K = m.Map_Kd.Sample2D(uv_);
}
else if (rand_ < pks)
{
L_ = brdf.specular(R_, m.Ns);
K = m.Ks;
}
else if (rand_ < pkr)
{
K = m.Kr;
glm::dvec3 N__ = N_;
double Nr = 1.0 / m.Nr;
if (glm::dot(V_, N_) > 0)
{
N__ = -N_;
Nr = 1.0 / Nr;
}
L_ = brdf.refract(V_, N__, Nr);
}
else
{
ray.color = m.Le;
return;
}
ray.p = ray.p + ray.hit_time * V_;
ray.d = L_;
ray_tracing(ray);
ray.color = m.Le + ray.color * K;
}
void transform_face(Obj *o)
{
glm::dmat3x3 rotate = o->pose * o->scale; // 物体缩放..
glm::dvec3 move; // 物体平移..
move[0] = o->pose[0][3];
move[1] = o->pose[1][3];
move[2] = o->pose[2][3];
std::vector<glm::dvec3> vertex_new;
std::vector<glm::dvec3> norm_new;
// 顶点坐标转换.
for (int i = 0; i < o->model.vertex_.size(); ++i)
vertex_new.push_back(o->model.vertex_[i] * rotate + move);
// 顶点法向量转换.
for (int i = 0; i < o->model.norm_.size(); ++i)
norm_new.push_back(o->model.norm_[i] * rotate);
for (glm::imat3x4 i : o->model.face_)
{
glm::dvec3 v1 = vertex_new[i[0][0]];
glm::dvec3 v2 = vertex_new[i[1][0]];
glm::dvec3 v3 = vertex_new[i[2][0]];
glm::dvec3 n1 = norm_new[i[0][1]];
glm::dvec3 n2 = norm_new[i[1][1]];
glm::dvec3 n3 = norm_new[i[2][1]];
glm::dvec2 uv1 = o->model.uv_coord_[i[0][2]];
glm::dvec2 uv2 = o->model.uv_coord_[i[1][2]];
glm::dvec2 uv3 = o->model.uv_coord_[i[2][2]];
// bounding box
glm::dvec3 A = {min3(v1.x, v2.x, v3.x), min3(v1.y, v2.y, v3.y), min3(v1.z, v2.z, v3.z)};
glm::dvec3 B = {max3(v1.x, v2.x, v3.x), max3(v1.y, v2.y, v3.y), max3(v1.z, v2.z, v3.z)};
faces.push_back({v1, v2, v3, n1, n2, n3, uv1, uv2, uv3, A, B,
o->model.material_[i[0][3]]});
}
}
uint32_t *get_framebuffer() { return fb.fb_; }
double get_time_ms()
{
double ret = (double)(clock() - timer) * 1000.0 / CLOCKS_PER_SEC;
return ret;
}
};
#endif
|
looptc_c2.c | #include <string.h>
void deinterleave(char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) {
int tab;
for (tab = 0; tab < ntabs; tab++) {
// unroll time dimension once
int time;
for (time = 0; time < ntimes; time+=2) {
// build temporary array containing 2 complete channel rows
char temp[2 * nchannels];
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel++) {
// reverse freq order to comply with header
temp[1*nchannels-channel-1] = page[(tab*nchannels + channel) * padded_size + (time + 0)];
temp[2*nchannels-channel-1] = page[(tab*nchannels + channel) * padded_size + (time + 1)];
}
// copy 2 full row at once
memcpy(&transposed[time*nchannels], temp, 2*nchannels);
}
}
}
|
vision.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS IIIII OOO N N %
% V V I SS I O O NN N %
% V V I SSS I O O N N N %
% V V I SS I O O N NN %
% V IIIII SSSSS IIIII OOO N N %
% %
% %
% MagickCore Computer Vision Methods %
% %
% Software Design %
% Cristy %
% September 2014 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/opencl-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/vision.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n n e c t e d C o m p o n e n t s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConnectedComponentsImage() returns the connected-components of the image
% uniquely labeled. The returned connected components image colors member
% defines the number of unique objects. Choose from 4 or 8-way connectivity.
%
% You are responsible for freeing the connected components objects resources
% with this statement;
%
% objects = (CCObjectInfo *) RelinquishMagickMemory(objects);
%
% The format of the ConnectedComponentsImage method is:
%
% Image *ConnectedComponentsImage(const Image *image,
% const size_t connectivity,CCObjectInfo **objects,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o connectivity: how many neighbors to visit, choose from 4 or 8.
%
% o objects: return the attributes of each unique object.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int CCObjectInfoCompare(const void *x,const void *y)
{
CCObjectInfo
*p,
*q;
p=(CCObjectInfo *) x;
q=(CCObjectInfo *) y;
return((int) (q->area-(ssize_t) p->area));
}
static void PerimeterThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height+1; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width+1; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
}
}
static void CircularityThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
object[i].metric[metric_index]=4.0*MagickPI*object[i].area/
(object[i].metric[metric_index]*object[i].metric[metric_index]);
}
}
static void MajorAxisThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
}
static void MinorAxisThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
}
static void EccentricityThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 },
ellipse_axis = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute eccentricity of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
object[i].metric[metric_index]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y*
PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x)));
}
}
static void AngleThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse angle of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=RadiansToDegrees(1.0/2.0*atan(2.0*M11*
PerceptibleReciprocal(M20-M02)));
if (fabs(M11) < 0.0)
{
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[metric_index]+=90.0;
}
else
if (M11 < 0.0)
{
if (fabs(M20-M02) >= 0.0)
{
if ((M20-M02) < 0.0)
object[i].metric[metric_index]+=90.0;
else
object[i].metric[metric_index]+=180.0;
}
}
else
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[metric_index]+=90.0;
}
}
MagickExport Image *ConnectedComponentsImage(const Image *image,
const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception)
{
#define ConnectedComponentsImageTag "ConnectedComponents/Image"
CacheView
*component_view,
*image_view,
*object_view;
CCObjectInfo
*object;
char
*c;
const char
*artifact,
*metrics[CCMaxMetrics];
double
max_threshold,
min_threshold;
Image
*component_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*equivalences;
ssize_t
i;
size_t
size;
ssize_t
background_id,
connect4[2][2] = { { -1, 0 }, { 0, -1 } },
connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } },
dx,
dy,
first,
last,
n,
step,
y;
/*
Initialize connected components image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (objects != (CCObjectInfo **) NULL)
*objects=(CCObjectInfo *) NULL;
component_image=CloneImage(image,0,0,MagickTrue,exception);
if (component_image == (Image *) NULL)
return((Image *) NULL);
component_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse)
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize connected components equivalences.
*/
size=image->columns*image->rows;
if (image->columns != (size/image->rows))
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception);
if (equivalences == (MatrixInfo *) NULL)
{
component_image=DestroyImage(component_image);
return((Image *) NULL);
}
for (n=0; n < (ssize_t) (image->columns*image->rows); n++)
(void) SetMatrixElement(equivalences,n,0,&n);
object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object));
if (object == (CCObjectInfo *) NULL)
{
equivalences=DestroyMatrixInfo(equivalences);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(object,0,MaxColormapSize*sizeof(*object));
for (i=0; i < (ssize_t) MaxColormapSize; i++)
{
object[i].id=i;
object[i].bounding_box.x=(ssize_t) image->columns;
object[i].bounding_box.y=(ssize_t) image->rows;
GetPixelInfo(image,&object[i].color);
}
/*
Find connected components.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel,
target;
ssize_t
neighbor_offset,
obj,
offset,
ox,
oy,
root;
/*
Is neighbor an authentic pixel and a different color than the pixel?
*/
GetPixelInfoPixel(image,p,&pixel);
if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) ||
((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows))
{
p+=GetPixelChannels(image);
continue;
}
neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx*
GetPixelChannels(image);
GetPixelInfoPixel(image,p+neighbor_offset,&target);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
p+=GetPixelChannels(image);
continue;
}
/*
Resolve this equivalence.
*/
offset=y*image->columns+x;
neighbor_offset=dy*image->columns+dx;
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != ox)
{
ox=obj;
status=GetMatrixElement(equivalences,ox,0,&obj);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != oy)
{
oy=obj;
status=GetMatrixElement(equivalences,oy,0,&obj);
}
if (ox < oy)
{
status=SetMatrixElement(equivalences,oy,0,&ox);
root=ox;
}
else
{
status=SetMatrixElement(equivalences,ox,0,&oy);
root=oy;
}
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,ox,0,&obj);
status=SetMatrixElement(equivalences,ox,0,&root);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,oy,0,&obj);
status=SetMatrixElement(equivalences,oy,0,&root);
}
status=SetMatrixElement(equivalences,y*image->columns+x,0,&root);
p+=GetPixelChannels(image);
}
}
}
/*
Label connected components.
*/
n=0;
component_view=AcquireAuthenticCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
ssize_t
id,
offset;
offset=y*image->columns+x;
status=GetMatrixElement(equivalences,offset,0,&id);
if (id != offset)
status=GetMatrixElement(equivalences,id,0,&id);
else
{
id=n++;
if (id >= (ssize_t) MaxColormapSize)
break;
}
status=SetMatrixElement(equivalences,offset,0,&id);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x >= (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y >= (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].color.red+=QuantumScale*GetPixelRed(image,p);
object[id].color.green+=QuantumScale*GetPixelGreen(image,p);
object[id].color.blue+=QuantumScale*GetPixelBlue(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p);
if (image->colorspace == CMYKColorspace)
object[id].color.black+=QuantumScale*GetPixelBlack(image,p);
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
SetPixelIndex(component_image,(Quantum) id,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(component_image);
}
if (n > (ssize_t) MaxColormapSize)
break;
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
image_view=DestroyCacheView(image_view);
equivalences=DestroyMatrixInfo(equivalences);
if (n > (ssize_t) MaxColormapSize)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"TooManyObjects");
}
background_id=0;
min_threshold=0.0;
max_threshold=0.0;
component_image->colors=(size_t) n;
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].color.red/=(QuantumScale*object[i].area);
object[i].color.green/=(QuantumScale*object[i].area);
object[i].color.blue/=(QuantumScale*object[i].area);
if (image->alpha_trait != UndefinedPixelTrait)
object[i].color.alpha/=(QuantumScale*object[i].area);
if (image->colorspace == CMYKColorspace)
object[i].color.black/=(QuantumScale*object[i].area);
object[i].centroid.x/=object[i].area;
object[i].centroid.y/=object[i].area;
max_threshold+=object[i].area;
if (object[i].area > object[background_id].area)
background_id=i;
}
max_threshold+=MagickEpsilon;
n=(-1);
artifact=GetImageArtifact(image,"connected-components:background-id");
if (artifact != (const char *) NULL)
background_id=(ssize_t) StringToLong(artifact);
artifact=GetImageArtifact(image,"connected-components:area-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max area threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].area < min_threshold) ||
(object[i].area >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:keep-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Keep selected objects based on color, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickFalse;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:keep");
if (artifact != (const char *) NULL)
{
/*
Keep selected objects based on id, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (c=(char *) artifact; *c != '\0'; )
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickFalse;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-top");
if (artifact != (const char *) NULL)
{
CCObjectInfo
*top_objects;
ssize_t
top_ids;
/*
Keep top objects.
*/
top_ids=(ssize_t) StringToLong(artifact);
top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors,
sizeof(*top_objects));
if (top_objects == (CCObjectInfo *) NULL)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(top_objects,object,component_image->colors*sizeof(*object));
qsort((void *) top_objects,component_image->colors,sizeof(*top_objects),
CCObjectInfoCompare);
for (i=top_ids+1; i < (ssize_t) component_image->colors; i++)
object[top_objects[i].id].merge=MagickTrue;
top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects);
}
artifact=GetImageArtifact(image,"connected-components:remove-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Remove selected objects based on color, keep others.
*/
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickTrue;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:remove-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:remove");
if (artifact != (const char *) NULL)
for (c=(char *) artifact; *c != '\0'; )
{
/*
Remove selected objects based on id, keep others.
*/
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:perimeter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max perimeter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="perimeter";
PerimeterThreshold(image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:circularity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max circularity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="circularity";
CircularityThreshold(image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:diameter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max diameter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="diameter";
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5);
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
}
artifact=GetImageArtifact(image,"connected-components:major-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse major threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="major-axis";
MajorAxisThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse minor threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="minor-axis";
MinorAxisThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:eccentricity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max eccentricity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="eccentricy";
EccentricityThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:angle-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse angle threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="angle";
AngleThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
/*
Merge any object not within the min and max area threshold.
*/
component_view=AcquireAuthenticCacheView(component_image,exception);
object_view=AcquireVirtualCacheView(component_image,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
{
RectangleInfo
bounding_box;
size_t
id;
ssize_t
j;
if (status == MagickFalse)
continue;
if ((object[i].merge == MagickFalse) || (i == background_id))
continue; /* keep object */
/*
Merge this object.
*/
for (j=0; j < (ssize_t) component_image->colors; j++)
object[j].census=0;
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
size_t
k;
if (status == MagickFalse)
continue;
j=(ssize_t) GetPixelIndex(component_image,p);
if (j == i)
for (k=0; k < (ssize_t) (connectivity > 4 ? 4 : 2); k++)
{
const Quantum
*q;
/*
Compute area of adjacent objects.
*/
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[k][1] : connect4[k][1];
dy=connectivity > 4 ? connect8[k][0] : connect4[k][0];
q=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx,
bounding_box.y+y+dy,1,1,exception);
if (q == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
j=(ssize_t) GetPixelIndex(component_image,q);
if (j != i)
object[j].census++;
}
p+=GetPixelChannels(component_image);
}
}
/*
Merge with object of greatest adjacent area.
*/
id=0;
for (j=1; j < (ssize_t) component_image->colors; j++)
if (object[j].census > object[id].census)
id=(size_t) j;
object[i].area=0.0;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,q) == i)
SetPixelIndex(component_image,(Quantum) id,q);
q+=GetPixelChannels(component_image);
}
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
}
}
object_view=DestroyCacheView(object_view);
component_view=DestroyCacheView(component_view);
artifact=GetImageArtifact(image,"connected-components:mean-color");
if (IsStringTrue(artifact) != MagickFalse)
{
/*
Replace object with mean color.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
component_image->colormap[i]=object[i].color;
}
(void) SyncImage(component_image,exception);
artifact=GetImageArtifact(image,"connected-components:verbose");
if ((IsStringTrue(artifact) != MagickFalse) ||
(objects != (CCObjectInfo **) NULL))
{
/*
Report statistics on each unique object.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width=0;
object[i].bounding_box.height=0;
object[i].bounding_box.x=(ssize_t) component_image->columns;
object[i].bounding_box.y=(ssize_t) component_image->rows;
object[i].centroid.x=0;
object[i].centroid.y=0;
object[i].census=object[i].area == 0.0 ? 0.0 : 1.0;
object[i].area=0;
}
component_view=AcquireVirtualCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns,
1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
size_t
id;
id=(size_t) GetPixelIndex(component_image,p);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x > (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y > (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
p+=GetPixelChannels(component_image);
}
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
component_view=DestroyCacheView(component_view);
qsort((void *) object,component_image->colors,sizeof(*object),
CCObjectInfoCompare);
if (objects == (CCObjectInfo **) NULL)
{
ssize_t
j;
artifact=GetImageArtifact(image,
"connected-components:exclude-header");
if (IsStringTrue(artifact) == MagickFalse)
{
(void) fprintf(stdout,"Objects (");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"id: ");
(void) fprintf(stdout,"bounding-box centroid area mean-color");
for (j=0; j <= n; j++)
(void) fprintf(stdout," %s",metrics[j]);
(void) fprintf(stdout,"):\n");
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (object[i].census > 0.0)
{
char
mean_color[MagickPathExtent];
GetColorTuple(&object[i].color,MagickFalse,mean_color);
(void) fprintf(stdout," ");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"%.20g: ",(double) object[i].id);
(void) fprintf(stdout,
"%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double)
object[i].bounding_box.width,(double)
object[i].bounding_box.height,(double)
object[i].bounding_box.x,(double) object[i].bounding_box.y,
object[i].centroid.x,object[i].centroid.y,
GetMagickPrecision(),(double) object[i].area,mean_color);
for (j=0; j <= n; j++)
(void) fprintf(stdout," %.*g",GetMagickPrecision(),
object[i].metric[j]);
(void) fprintf(stdout,"\n");
}
}
}
if (objects == (CCObjectInfo **) NULL)
object=(CCObjectInfo *) RelinquishMagickMemory(object);
else
*objects=object;
return(component_image);
}
|
pysnobal.h | /*
** NAME
** pgm.h
**
** DESCRIPTION
** The include file for 'isnobal'.
*/
#ifndef _ISNOBAL_H_
#define _ISNOBAL_H_
#define DEFAULT_Z_U 5.0 /* default wind speed measurement height */
#define DEFAULT_Z_T 5.0 /* default air temp and vapor press hght */
#define IBANDS 6 /* # bands in input image */
#define EMBANDS 10 /* # bands in energy/mass output image */
#define SBANDS 9 /* # bands in snow output image */
#define PBANDS 4 /* # bands in precip image */
#define ICBANDS 7 /* # bands in initial conditions image */
#define ICBANDS_RESTART 8 /* # bands in init cond image (restart) */
#define TBANDS 17 /* # bands in temporary results file */
#define NO_DATA -999999 /* output value for masked pnt (no data) */
typedef struct {
int masked;
double current_time;
double time_since_out;
double elevation;
double z_0;
double rho;
double T_s_0;
double T_s_l;
double T_s;
double h2o_sat;
double h2o_max;
double h2o;
double h2o_vol;
double h2o_total;
int layer_count;
double cc_s_0;
double cc_s_l;
double cc_s;
double m_s_0;
double m_s_l;
double m_s;
double z_s_0;
double z_s_l;
double z_s;
double R_n_bar;
double H_bar;
double L_v_E_bar;
double G_bar;
double G_0_bar;
double M_bar;
double delta_Q_bar;
double delta_Q_0_bar;
double E_s_sum;
double melt_sum;
double ro_pred_sum;
} OUTPUT_REC;
//typedef OUTPUT_REC *out_p;
//extern OUTPUT_REC output_rec[100]; /* output data structure */
typedef struct {
int* masked;
double* current_time;
double* time_since_out;
double* elevation;
double* z_0;
double* rho;
double* T_s_0;
double* T_s_l;
double* T_s;
double* h2o_sat;
double* h2o_max;
double* h2o_vol;
double* h2o;
double* h2o_total;
int* layer_count;
double* cc_s_0;
double* cc_s_l;
double* cc_s;
double* m_s_0;
double* m_s_l;
double* m_s;
double* z_s_0;
double* z_s_l;
double* z_s;
double* R_n_bar;
double* H_bar;
double* L_v_E_bar;
double* G_bar;
double* G_0_bar;
double* M_bar;
double* delta_Q_bar;
double* delta_Q_0_bar;
double* E_s_sum;
double* melt_sum;
double* ro_pred_sum;
} OUTPUT_REC_ARR;
typedef struct {
double* S_n;
double* I_lw;
double* T_a;
double* e_a;
double* u;
double* T_g;
double* m_pp;
double* percent_snow;
double* rho_snow;
double* T_pp;
} INPUT_REC_ARR;
typedef struct {
double z_u;
double z_T;
double z_g;
int relative_heights;
double max_h2o_vol;
double max_z_s_0;
} PARAMS;
/* ------------------------------------------------------------------------- */
/*
* Routines that are part of isnobal program.
*/
//extern int call_snobal(int N, int nthreads, int first_step, TSTEP_REC tstep_info[4], OUTPUT_REC** output_rec, INPUT_REC_ARR* input1, INPUT_REC_ARR* input2, PARAMS params, OUTPUT_REC_ARR* output1);
extern int call_snobal(int N, int nthreads, int first_step, TSTEP_REC tstep_info[4], INPUT_REC_ARR* input1, INPUT_REC_ARR* input2, PARAMS params, OUTPUT_REC_ARR* output1);
//extern void assign_buffers (int masked, int n, int output, OUTPUT_REC **output_rec);
//extern void buffers (void);
//extern void check_range (int index, double value, double min, double max,
// char * descrip, bool_t print_line_samp);
//extern void check_units (LQH_T **lq_headers, UNITS_T *units, int nbands,
// int fd);
//extern void copy_image (char *tempfile, int nbands, fpixel_t * buf,
// int fdo);
//extern void e_m_image (int step, OUTPUT_REC **output_rec, int nbits);
//extern bool_t extract_data (bool_t first_step, int n, bool_t sun_up[], OUTPUT_REC **output_rec);
//extern void headers (void);
//extern void isnobal (int out_step, int nthreads, int dynamic_teams, int got_opt_F, int verbose, int nbits);
///*extern void isnobal (int out_step);*/
//extern void newlqh (int fdo, int nbands, fpixel_t *mins,
// fpixel_t *maxs, char **units);
//extern int open_input (char *prefix, int index, bool_t *sun_up);
//extern int output_image (char * filename, int nbands, char ** units,
// char ** annots, fpixel_t * mins,
// fpixel_t * maxs, int nbits);
//extern bool_t precip_event (float curr_time, char *pre_img);
//extern void precip_hdrs (char *filename);
//extern void read_data (int first_step);
//extern void snow_image (int step, OUTPUT_REC **output_rec, int nbits);
//extern void temp_filename (char *prefix, char *filename);
//extern void warn_range (int index, double value, double min, double max,
// char * descrip, bool_t print_line_samp);
//extern void write_data (int output, int last_step);
/* ------------------------------------------------------------------------- */
/*
* Global variables internal to isnobal program.
*/
extern int units_warn; /* check units in input images? */
extern char *compress_cmd; /* shell command to compress images */
/* timesteps and indices */
extern int start_step; /* index of first timestep */
extern int nstep; /* # of data timesteps */
extern int nDigits; /* # of digits in suffixes of images*/
extern bool_t restart; /* restart flag */
/* model variables */
extern double elevation;
//#pragma omp threadprivate(elevation)
#endif /* _ISNOBAL_H_ */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
gemm.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "common/log.h"
#include "memory/t_malloc.h"
#ifdef _OPENMP
#include <omp.h>
#endif
// 矩阵取值运算宏,假设矩阵按行存储
#define A(i, j) A[(i)*lda + (j)]
#define B(i, j) B[(i)*ldb + (j)]
#define C(i, j) C[(i)*ldc + (j)]
#if __aarch64__
#define MR_INT8 4
#define NR_INT8 2
#define MR 6
#define NR 16
#else
#define MR_INT8 4
#define NR_INT8 2
#define MR 6
#define NR 8
#endif
#define s_min(i, j) ((i) < (j) ? (i) : (j))
namespace paddle_mobile {
namespace operators {
namespace math {
class Gemm {
public:
/*
// 将 A 矩阵分块复制到连续内存(ColMajor)
void PackMatrixA(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
// 将 B 矩阵分块复制到连续内存(ColMajor)
void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
*/
typedef void (Gemm::*FnPack)(int, int, int, const float *, int, float *);
typedef void (Gemm::*FnAddDot)(int, const float *, const float *, float *,
int);
FnPack procPackA;
FnPack procPackB;
FnAddDot procAddDot;
// 将 A 矩阵分块复制到连续内存(RowMajor)
void PackMatrixA_4r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_6r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_8r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_omp_6r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
void PackMatrixA_omp_8r(int m, int k, int m_tail, const float *A, int lda,
float *buffer);
// 将 B 矩阵分块复制到连续内存(RowMajor)
void PackMatrixB_8c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_12c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_16c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_8c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_12c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
void PackMatrixB_omp_16c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer);
// 分块矩阵乘法
void InnerKernel(int mc, int nc, float alpha, const float *a, const float *b,
float beta, float *c, float *C, int ldc, bool relu);
void InnerKernelWithBias(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *bias);
void InnerKernelWithBn(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale, float *new_bias);
void InnerKernelWithBnAdd(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale,
float *new_bias, float *bias);
void InnerKernelWithPRelu(int mc, int nc, const float *a, const float *b,
float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 向量矩阵乘法 (M = 1)
void VectorKernel(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu);
/*
void VectorKernelWithBn(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float
*C, int ldc, bool relu, float *new_scale, float *new_bias);
*/
// 计算一个更小的 C 矩阵分块
void AddDot4x4(int k, const float *a, const float *b, float *c, int ldc);
void AddDot4x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot8x12(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x16(int k, const float *a, const float *b, float *c, int ldc);
// 分块矩阵乘法结果回写
// C = A * B
void WriteBasic(int mc, int nc, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void WriteWithAlphaBeta(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C
void WriteWithAdd(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + bias
void WriteWithAddV1(int mc, int nc, float *c, float *C, int ldc, float *bias);
// C = A * B + C, relu(C)
void WriteWithAddRelu(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C,prelu(C)
void WriteWithAddPRelu(int mc, int nc, float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// C = A * B + bias ,relu(C)
void WriteWithAddReluV1(int mc, int nc, float *c, float *C, int ldc,
float *bias);
// C = A * B, batchnorm(C)
void WriteWithBn(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void WriteWithBnRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
void WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias, float *bias1);
// 向量矩阵乘法结果回写
// C = A * B
void VecWriteBasic(int n, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void VecWriteWithAlphaBeta(int n, float *c, float *C, int ldc);
// C = A * B + C
void VecWriteWithAdd(int n, float *c, float *C, int ldc);
// C = A * B + C, relu(C)
void VecWriteWithAddRelu(int n, float *c, float *C, int ldc);
/*
// C = A * B, batchnorm(C)
void VecWriteWithBn(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void VecWriteWithBnRelu(int n, float *c, float *C, int ldc, float
*new_scale, float *new_bias);
*/
// 32位 float 矩阵乘法
void Sgemm(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc, bool relu,
float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom
void SgemmWithBn(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *new_scale, float *new_bias, float *bias);
void SgemmWithPRelu(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 32位 float 矩阵乘法(openmp 多线程版本)
void Sgemm_omp(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom(openmp 多线程版本)
void SgemmWithBn_omp(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float *C,
int ldc, bool relu, float *new_scale, float *new_bias,
float *bias);
void SgemmWithPRelu_omp(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 8 bits function cluster begins
// 8 bits int small block inner product
void AddDot4x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot4x2(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot6x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
// 8 bits int inner product
template <typename Otype>
void InnerKernel(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu);
template <typename Otype>
void InnerKernelWithBias(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
// 8 bits int pack function
void PackMatrixA_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixA_4r_16(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixA_6r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_2c_16(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixB_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_omp_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r_16(int32_t m, int32_t k, int32_t m_tail,
const int8_t *A, int32_t lda, int8_t *buffer);
void PackMatrixB_omp_2c_16(int32_t k, int32_t n, int32_t n_tail,
const int8_t *B, int32_t ldb, int8_t *buffer);
// 8 bits int matrix product
template <typename Itype, typename Btype, typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
template <typename Itype, typename Btype, typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, int32_t *bias, bool addOnRow = false);
// 8 bits int write back
// C = A * B
void WriteBasic(int32_t mc, int32_t nc, int32_t *c, int32_t *C, int32_t ldc);
// C = A * B + bias, scale * relu(C)
void WriteWithAddReluScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on column
void WriteWithAddScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on row
void WriteWithAddScaleT(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
private:
int MC = 0;
int KC = 0;
int NC = 0;
// 32位 float
float *packedA;
float *packedB;
float *packedC;
float *zero;
// 8 bits int
int8_t *packedA_int8;
int8_t *packedB_int8;
int32_t *packedC_int32;
int8_t *zero_int8;
};
// 8 bits int matrix product (m*k x k*n)
template <typename Otype>
void Gemm::Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow) {
// L1 data cache is 32 kib (Per Contex-A57, Contex-A72, Contex-A73)
// L2 cache is 0.5~4 Mib (Contex-A72 cluster)
int32_t L1 = 32 * 1024;
int32_t L2 = 512 * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
MC = L1 / (KC * sizeof(int8_t));
NC = L2 / (KC * sizeof(int8_t));
// make sure MC is multiple of MR_INT8, and NC is multiple of NR_INT8
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// DLOG << "mblock_num = " << mblock_num << ", MC = " << MC << "\n";
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// DLOG << "nblock_num = " << nblock_num << ", NC = " << NC << "\n";
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC));
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
int32_t mc, nc;
for (int32_t j = 0; j < n; j += NC) {
nc = s_min(n - j, NC);
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, packedB_int8);
for (int32_t i = 0; i < m; i += MC) {
mc = s_min(m - i, MC);
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, packedA_int8);
if (bias == nullptr) {
InnerKernel(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + j,
addOnRow);
} else {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + i,
addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
// 8 bits int matrix product (m*k x k*n), omp version
template <typename Otype>
void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
const int8_t *A, int32_t lda, const int8_t *B, int32_t ldb,
float beta, Otype *C, int32_t ldc, bool relu,
int32_t *bias, bool addOnRow) {
#ifdef _OPENMP
int32_t max_threads = omp_get_max_threads();
#else
int32_t max_threads = 1;
#endif
int32_t L1 = 64 / max_threads * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
if (m > n) {
// 对 A 分块
MC = L1 / (KC * sizeof(int8_t));
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// 补齐 B
NC = (n + NR_INT8 - 1) / NR_INT8 * NR_INT8;
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
#if __aarch64__
// TODO(paddle mobile)
#else
PackMatrixB_omp_2c_16(k, n, n % NR_INT8, B, ldb, packedB_int8);
#endif
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC * max_threads));
} else {
// 对 B 分块
NC = L1 / (KC * sizeof(int8_t));
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// 补齐 A
MC = (m + MR_INT8 - 1) / MR_INT8 * MR_INT8;
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
#if __aarch64__
// TODO(paddle mobile)
#else
PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8);
#endif
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC * max_threads));
}
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC * max_threads));
if (m > n) {
#pragma omp parallel for
for (int32_t i = 0; i < m; i += MC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t mc;
mc = s_min(m - i, MC);
int8_t *local_A = packedA_int8 + MC * KC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
// TODO(paddle mobile)
#else
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A);
#endif
if (bias == nullptr) {
InnerKernel(mc, n, alpha, local_A, packedB_int8, beta, local_C,
&C(i, 0), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias, addOnRow);
} else {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias + i, addOnRow);
}
}
}
} else {
#pragma omp parallel for
for (int32_t j = 0; j < n; j += NC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t nc;
nc = s_min(n - j, NC);
int8_t *local_B = packedB_int8 + KC * NC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
// TODO(paddle mobile)
#else
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B);
#endif
if (bias == nullptr) {
InnerKernel(m, nc, alpha, packedA_int8, local_B, beta, local_C,
&C(0, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias + j, addOnRow);
} else {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias, addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
|
citrix_ns_fmt_plug.c | /*
* Description from Nicolas Ruff:
* - Salt value is hashed as an hexadecimal string, not bytes.
* - The trailing NULL byte of password string is taken into account during
* hashing.
* - The leading '1' is actually the string length
* '1' = 49 = len('1') + len(hex_salt) + len(hex_sha1)
*
* ---------------------------------------
* import hashlib
*
* def netscaler_hash( rand_bytes, pwd ):
* s = hashlib.sha1()
* s.update( rand_bytes )
* s.update( pwd )
* return "1" + rand_bytes + s.hexdigest()
*
* # TEST VECTOR
* # 14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f
*
* rand_bytes = "4dfca1e6"
* pwd = "nsroot\x00"
* print netscaler_hash( rand_bytes, pwd )
* ---------------------------------------
*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This version is hard coded for salt length 8 (for speed).
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ctrxns;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ctrxns);
#else
#include <string.h>
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "johnswap.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#include "simd-intrinsics.h"
#include "common.h"
#include "sha.h"
#include "memdbg.h" // Must be last included header
#define FORMAT_LABEL "Citrix_NS10"
#define FORMAT_NAME "Netscaler 10"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55 - SALT_SIZE - 1)
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ((index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (((i)&3)^3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4) //for endianity conversion
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"100000000f1dc96f425971ba590a076fd0f8bccbf25c1ba0c", ""},
{"14623718525fe334bbd9c0704e06ce134ef17b51f6b33548c", " "},
{"15c5c5c5c6ccd884f6383f55a6aeba5f847775e57ab012675", "Tw"},
{"13333333319143136ba9ff9e18d1cb022b63df0926de9509e", "333"},
{"144434241d7ce89a7484cd202400639692258dde37efc29c5", "four"},
{"100010203e09cefed1847b7a2a5e7a5d2cdc67e8a56ed0bdd", "fiver"},
{"14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f", "nsroot"},
{"1deadcafe7587ea23b25a6ccf3fd53192e36ad3e9a2553b20", "magnum!"},
{NULL}
};
#ifdef SIMD_COEF_32
static unsigned char (*saved_key)[SHA_BUF_SIZ * 4 * NBKEYS];
static unsigned char (*crypt_key)[BINARY_SIZE * NBKEYS];
static unsigned int kpc;
#else
static char saved_salt[SALT_SIZE];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE / 4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt / NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
kpc = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static void *get_binary(char *ciphertext)
{
static unsigned char *realcipher;
int i, len;
if (!realcipher)
realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
len = *ciphertext;
ciphertext += len - 2 * BINARY_SIZE;
for (i = 0; i < BINARY_SIZE; i++)
{
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16
+ atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
#ifdef SIMD_COEF_32
alter_endianity(realcipher, BINARY_SIZE);
#endif
return (void*)realcipher;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int len;
len = *ciphertext;
if (len != (int)'1')
return 0;
if (strlen(ciphertext) != len)
return 0;
if (len != strspn(ciphertext, HEXCHARS_lc))
return 0;
return 1;
}
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
#if ARCH_ALLOWS_UNALIGNED
const uint32_t *wkey = (uint32_t*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const uint32_t *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ?
key : strcpy(buf_aligned, key));
#endif
uint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETPOS(SALT_SIZE ^ 3, index)];
unsigned int len;
uint32_t temp;
len = SALT_SIZE;
while((temp = *wkey++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 16));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80U << 24));
len+=2;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
keybuf_word += SIMD_COEF_32;
if (!(temp & 0xff000000))
{
*keybuf_word = 0x80000000;
len+=3;
goto key_cleaning;
}
len += 4;
}
*keybuf_word = 0x00800000;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
len += 1; /* Trailing null is included */
((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;
#else
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
#endif
}
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
unsigned int i, s;
static char out[PLAINTEXT_LENGTH + 1];
s = (((unsigned int*)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3) - SALT_SIZE - 1;
for (i = 0; i < s; i++)
out[i] = ((char*)saved_key)[GETPOS(SALT_SIZE + i, index)];
out[i] = 0;
return out;
#else
return saved_key[index];
#endif
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
uint32_t w;
} out;
ciphertext++;
memcpy(out.c, ciphertext, SALT_SIZE);
return (void*)out.c;
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
int i, index;
for (index = 0; index < kpc; index++)
for (i = 0; i < SALT_SIZE; i++)
saved_key[0][GETPOS(i, index)] =
((unsigned char*)salt)[i];
#else
memcpy(saved_salt, salt, SALT_SIZE);
#endif
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x, y=0;
for (; y < kpc/SIMD_COEF_32; y++)
for (x = 0; x < SIMD_COEF_32; x++)
{
if (((uint32_t*)binary)[0] ==
((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5])
return 1;
}
return 0;
#else
int index = 0;
#ifdef _OPENMP
for (index = 0; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x, y;
x = index & (SIMD_COEF_32-1);
y = (unsigned int)index / SIMD_COEF_32;
if (((uint32_t*)binary)[0] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5])
return 0;
if (((uint32_t*)binary)[1] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*1])
return 0;
if (((uint32_t*)binary)[2] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*2])
return 0;
if (((uint32_t*)binary)[3] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*3])
return 0;
if (((uint32_t*)binary)[4] != ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32*5+SIMD_COEF_32*4])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; ++index)
#endif
{
#ifdef SIMD_COEF_32
SIMDSHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE);
SHA1_Update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]) + 1);
SHA1_Final((unsigned char*)crypt_key[index], &ctx);
#endif
}
return count;
}
#ifdef SIMD_COEF_32
#define HASH_IDX ((index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*5)
static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
return *(uint32_t*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_ctrxns = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
atomic-4.c | /* PR middle-end/35611 */
extern void abort (void);
int
main (void)
{
long double d = .0L;
int i;
#pragma omp parallel for shared (d)
for (i = 0; i < 1000; i++)
#pragma omp atomic
d += 1.0L;
if (d != 1000.0L)
abort ();
return 0;
}
|
omp_bug1fix.c | /******************************************************************************
* FILE: omp_bug1fix.c
* DESCRIPTION:
* This is a corrected version of the omp_bug1.c example. Corrections
* include removing all statements between the parallel for construct and
* the actual for loop, and introducing logic to preserve the ability to
* query a thread's id and print it from inside the for loop.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
#define CHUNKSIZE 5
int main (int argc, char *argv[])
{
int i, chunk, tid;
float a[N], b[N], c[N];
char first_time;
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
chunk = CHUNKSIZE;
first_time = 'y';
#pragma omp parallel for \
shared(a,b,c,chunk) \
private(i,tid) \
schedule(static,chunk) \
firstprivate(first_time)
for (i=0; i < N; i++) {
if (first_time == 'y') {
tid = omp_get_thread_num();
first_time = 'n';
}
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(512)
#pragma omp distribute simd
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// ****************************
// Series 2: with dist_schedule
// ****************************
//
// Test: #iterations == #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(512)
#pragma omp distribute simd dist_schedule(static,1)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations == #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(512)
#pragma omp distribute simd dist_schedule(static,512)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size
//
ZERO(A);
int ten = 10;
int chunkSize = 512/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(512)
#pragma omp distribute simd dist_schedule(static,chunkSize)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,1)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,500)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size
//
ZERO(A);
ten = 10;
chunkSize = 500/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,chunkSize)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,1)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,123)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(#iterations)
//
ZERO(A);
ten = 10;
chunkSize = 123/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
#pragma omp distribute simd dist_schedule(static,chunkSize)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// ****************************
// Series 3: with ds attributes
// ****************************
//
// Test: private
//
ZERO(A); ZERO(B);
double p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(256)
{
#pragma omp distribute simd private(p,q)
for(int i = 0 ; i < N ; i++) {
p = 2;
q = 3;
A[i] += p;
B[i] += q;
}
}
}
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*2) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
fail = 1;
}
if (B[i] != TRIALS*3) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: firstprivate
//
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation
#pragma omp teams num_teams(64)
{
#pragma omp distribute simd firstprivate(p,q)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: lastprivate
//
int lastpriv = -1;
#pragma omp target map(tofrom:lastpriv)
#pragma omp teams num_teams(10)
#pragma omp distribute simd lastprivate(lastpriv)
for(int i = 0 ; i < omp_get_num_teams() ; i++)
lastpriv = omp_get_team_num();
if(lastpriv != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv, 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 4: collapse
// **************************
//
// Test: 2 loops
//
double * S = malloc(N*N*sizeof(double));
double * T = malloc(N*N*sizeof(double));
double * U = malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N])
#pragma omp teams num_teams(512)
#pragma omp distribute simd collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
int M = N/8;
double * V = malloc(M*M*M*sizeof(double));
double * Z = malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M])
#pragma omp teams num_teams(512)
#pragma omp distribute simd collapse(3)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
GB_unop__lnot_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_int16_int16)
// op(A') function: GB (_unop_tran__lnot_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolutiondepthwise_3x3_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + g * packn, vl) : vfmv_v_f_f16m1((__fp16)0.f, vl);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat16m1_t _sum00 = _bias0;
vfloat16m1_t _sum01 = _bias0;
vfloat16m1_t _sum10 = _bias0;
vfloat16m1_t _sum11 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k00, _r01, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k01, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k02, _r03, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k10, _r11, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k11, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k12, _r13, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k00, _r10, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k01, _r11, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k02, _r12, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k00, _r11, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k01, _r12, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k02, _r13, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k20, _r21, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k21, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k22, _r23, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k10, _r20, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k11, _r21, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k12, _r22, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k10, _r21, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k11, _r22, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k12, _r23, vl);
vfloat16m1_t _r30 = vle16_v_f16m1(r3, vl);
vfloat16m1_t _r31 = vle16_v_f16m1(r3 + packn, vl);
vfloat16m1_t _r32 = vle16_v_f16m1(r3 + packn * 2, vl);
vfloat16m1_t _r33 = vle16_v_f16m1(r3 + packn * 3, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k20, _r30, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k21, _r31, vl);
_sum10 = vfmacc_vv_f16m1(_sum10, _k22, _r32, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k20, _r31, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k21, _r32, vl);
_sum11 = vfmacc_vv_f16m1(_sum11, _k22, _r33, vl);
vse16_v_f16m1(outptr0, _sum00, vl);
vse16_v_f16m1(outptr0 + packn, _sum01, vl);
vse16_v_f16m1(outptr1, _sum10, vl);
vse16_v_f16m1(outptr1 + packn, _sum11, vl);
outptr0 += packn * 2;
outptr1 += packn * 2;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
r3 += packn * 2;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _sum1 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k00, _r10, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k01, _r11, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k02, _r12, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k10, _r20, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k11, _r21, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k12, _r22, vl);
vfloat16m1_t _r30 = vle16_v_f16m1(r3, vl);
vfloat16m1_t _r31 = vle16_v_f16m1(r3 + packn, vl);
vfloat16m1_t _r32 = vle16_v_f16m1(r3 + packn * 2, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k20, _r30, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k21, _r31, vl);
_sum1 = vfmacc_vv_f16m1(_sum1, _k22, _r32, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
vse16_v_f16m1(outptr1, _sum1, vl);
outptr0 += packn;
outptr1 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
}
r0 += 2 * packn + w * packn;
r1 += 2 * packn + w * packn;
r2 += 2 * packn + w * packn;
r3 += 2 * packn + w * packn;
outptr0 += outw * packn;
outptr1 += outw * packn;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat16m1_t _sum00 = _bias0;
vfloat16m1_t _sum01 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k00, _r01, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k01, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k02, _r03, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k10, _r11, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k11, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k12, _r13, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k20, _r21, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k21, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k22, _r23, vl);
vse16_v_f16m1(outptr0, _sum00, vl);
vse16_v_f16m1(outptr0 + packn, _sum01, vl);
outptr0 += packn * 2;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
}
r0 += 2 * packn;
r1 += 2 * packn;
r2 += 2 * packn;
}
}
}
static void convdw3x3s2_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * packn;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + g * packn, vl) : vfmv_v_f_f16m1((__fp16)0.f, vl);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl);
vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl);
vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl);
vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl);
vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl);
vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl);
vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl);
vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl);
vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat16m1_t _sum00 = _bias0;
vfloat16m1_t _sum01 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k00, _r02, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k01, _r03, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k02, _r04, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
vfloat16m1_t _r13 = vle16_v_f16m1(r1 + packn * 3, vl);
vfloat16m1_t _r14 = vle16_v_f16m1(r1 + packn * 4, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k10, _r12, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k11, _r13, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k12, _r14, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
vfloat16m1_t _r23 = vle16_v_f16m1(r2 + packn * 3, vl);
vfloat16m1_t _r24 = vle16_v_f16m1(r2 + packn * 4, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f16m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k20, _r22, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k21, _r23, vl);
_sum01 = vfmacc_vv_f16m1(_sum01, _k22, _r24, vl);
vse16_v_f16m1(outptr0, _sum00, vl);
vse16_v_f16m1(outptr0 + packn, _sum01, vl);
outptr0 += packn * 2;
r0 += packn * 4;
r1 += packn * 4;
r2 += packn * 4;
}
for (; j < outw; j++)
{
vfloat16m1_t _sum0 = _bias0;
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k02, _r02, vl);
vfloat16m1_t _r10 = vle16_v_f16m1(r1, vl);
vfloat16m1_t _r11 = vle16_v_f16m1(r1 + packn, vl);
vfloat16m1_t _r12 = vle16_v_f16m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k12, _r12, vl);
vfloat16m1_t _r20 = vle16_v_f16m1(r2, vl);
vfloat16m1_t _r21 = vle16_v_f16m1(r2 + packn, vl);
vfloat16m1_t _r22 = vle16_v_f16m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f16m1(_sum0, _k22, _r22, vl);
vse16_v_f16m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
FriendAlignFinder.h | ///////////////////////////////////////////////////////////////////////////////
// SOFTWARE COPYRIGHT NOTICE AGREEMENT //
// This software and its documentation are copyright (2012) by the //
// Broad Institute. All rights are reserved. This software is supplied //
// without any warranty or guaranteed support whatsoever. The Broad //
// Institute is not responsible for its use, misuse, or functionality. //
///////////////////////////////////////////////////////////////////////////////
#ifndef FRIEND_ALIGNS_FINDER3_H
#define FRIEND_ALIGNS_FINDER3_H
// MakeDepend: library OMP
// MakeDepend: cflags OMP_FLAGS
#include "paths/long/FriendAligns.h"
#include "paths/long/MakeAlignments.h"
#include "kmers/ReadPather.h"
#include "kmers/ReadPatherDefs.h"
#include "Vec.h"
#include "ParallelVecUtilities.h"
// ====================== FriendAlignFinder implementations =============================
template <int K>
class FriendAlignFinder : public FriendAlignerImpl {
public:
// Explicitly if a read align is actually valid. Used to removed some
// false positives.
bool ValidateAlign( const simple_align_data& a ) const {
typedef bvec::const_iterator Itr;
bvec const& read1 = mReads[a.id1];
bvec read2RC;
bvec const& read2 = a.rc2 ?
read2RC.ReverseComplement(mReads[a.id2]) :
mReads[a.id2];
Itr it1 = read1.begin();
Itr it2 = read2.begin();
if ( a.offset > 0 )
{
ForceAssertLt(static_cast<unsigned>(a.offset),read1.size());
it1 += a.offset;
}
else
{
ForceAssertLt(static_cast<unsigned>(-a.offset),read2.size());
it2 -= a.offset;
}
Itr end = it1 + std::min(std::distance(it1,read1.end()),
std::distance(it2,read2.end()));
bool find_match = false;
while ( !find_match ) {
std::pair<Itr,Itr> mis_locs = mismatch( it1, end, it2 );
if ( mis_locs.first - it1 >= K )
find_match = true;
if ( mis_locs.first == end )
break;
it1 = mis_locs.first + 1;
it2 = mis_locs.second + 1;
}
return find_match;
}
// Data structure to store read location on the unipath
struct ReadLocOnUnipath {
EdgeID uid;
short int start;
unsigned int rid;
bool rc;
friend std::ostream& operator<<( std::ostream& out, const ReadLocOnUnipath& a ) {
char dir = ( a.rc ? '-' : '+' );
out << "loc " << a.rid<< "(" << a.start << "," << ")" << dir << "@" << a.uid.val() ;
return out;
}
friend bool operator<( const ReadLocOnUnipath& l, const ReadLocOnUnipath& r) {
if ( l.uid != r.uid ) return l.uid < r.uid;
return l.start < r.start;
}
};
// We could use more efficient containers. But just use vec of vec for this moment.
typedef vec<ReadLocOnUnipath> PathSegVec;
typedef vec<ReadLocOnUnipath> ReadULocVec;
FriendAlignFinder (const vecbvec& reads, const int max_freq = 1000, Bool use_down_sampling = False, int verb = 1 )
: mReads(reads), mpDict(NULL), mpGraph(NULL),
mCopyNumberMax( max_freq ), mUseDownSampling(use_down_sampling), mVerbose(verb)
{ Init(); }
FriendAlignFinder( const FriendAlignFinder& )=delete;
FriendAlignFinder& operator= ( const FriendAlignFinder& )=delete;
virtual ~FriendAlignFinder () { delete mpGraph; delete mpDict; }
// Find all alignments of one read
virtual void getAligns( size_t readId, Friends* pFriends )
{ vec<ReadLocOnUnipath> locvec;
PathOneRead( readId, &locvec );
// Check for bad alignments
return GetAlignsOneReadUnsorted( readId, locvec, pFriends ); }
private:
void Init( unsigned int coverage = 5, unsigned int nThreads = 0) {
// ========= build the kmer dictionary =========
if ( mVerbose >= 1 )
std::cout << Date() << ": creating dictionary" << std::endl;
size_t dictSize = mReads.SizeSum() / coverage;
mpDict = new KmerDict<K> ( 5*dictSize/4 );
mpDict->process(mReads,mVerbose,false,nThreads,100);
if ( mVerbose >= 1 ) {
std::cout << Date( ) << ": there are " << mpDict->size()
<< " kmers (expected ~" << dictSize << ")" << std::endl;
ReportMemUsage();
}
size_t old_dict_size = mpDict->size();
//mpDict->clean( typename KmerDict<K>::BadKmerCountFunctor(2, mCopyNumberMax));
// the result seems better without using mCopyNumberMax at this stage
mpDict->clean( typename KmerDict<K>::BadKmerCountFunctor(2));
if ( mVerbose >= 1 )
std::cout << Date() << ": Cleaning bad kmers, keeping " << mpDict->size()
<< "(" << ( mpDict->size() * 100 / old_dict_size ) << "%)" << std::endl;
// ========= build the unipath graph =========
mpGraph = new UnipathGraph<K>(*mpDict, mVerbose);
if ( mVerbose >= 1 ) ReportMemUsage();
// ======== Index the read locs on unipaths
GenerateReadLocs();
if ( mVerbose >= 1 ) ReportMemUsage();
}
void GenerateReadLocs( ) {
int64_t total_locs_deleted = 0;
mULocs.clear_and_resize( mpGraph->getNEdges() );
#pragma omp parallel for schedule(dynamic, 100)
for( size_t iread = 0; iread < mReads.size(); iread++ ) {
vec<ReadLocOnUnipath> locvec;
PathOneRead( iread, &locvec );
int num_locs_deleted = 0;
if ( mUseDownSampling )
num_locs_deleted = DownSampleLocsOfOneRead( &locvec );
#pragma omp critical
{
total_locs_deleted += num_locs_deleted;
for ( size_t j = 0; j < locvec.size(); ++j )
mULocs[ locvec[j].uid.val() ].push_back( locvec[j] );
}
}
#pragma omp parallel for schedule(dynamic, 100)
for ( size_t i = 0; i < mULocs.size(); ++i ) {
int ulen = mpGraph->getEdge( EdgeID(i) ).getLength();
if ( ulen < 5 && mULocs[i].isize() > mCopyNumberMax )
mULocs[i].clear();
else
Sort(mULocs[i]);
}
uint64_t total = SizeSum( mULocs );
if ( mVerbose >= 1 )
std::cout << Date() << ": Found " << ToStringAddCommas( total ) << " locs"
<< " after deleting " << ToStringAddCommas(total_locs_deleted) << std::endl;
}
void GetAlignsOneReadUnsorted( size_t read_id,
const vec<ReadLocOnUnipath>& locvec,
Friends *pFriends ) const {
std::set<simple_align_data> uniq_aligns;
for( size_t i = 0; i < locvec.size(); ++i ) {
int nkmer1 = mReads[read_id].size() - K + 1;
const ReadLocOnUnipath& loc1 = locvec[i];
int stop1 = loc1.start + nkmer1;
const ReadULocVec& ulocvec = mULocs[ loc1.uid.val() ];
bool isPalindrome = mpGraph->getEdge(loc1.uid).isPalindrome();
for ( size_t x2 = 0; x2 < ulocvec.size(); ++x2 ) {
const ReadLocOnUnipath& loc2 = ulocvec[x2];
int nkmer2 = mReads[loc2.rid].size() - K + 1;
int stop2 = loc2.start + nkmer2;
if ( loc2.rid == loc1.rid ) continue;
if ( stop2 <= loc1.start ) continue;
if ( loc2.start >= stop1 ) continue;
{ // for all cases
Bool rc = loc2.rc ^ loc1.rc;
int offset2 = ( loc1.rc ? stop1 - stop2 : loc2.start - loc1.start );
simple_align_data a(loc1.rid, loc2.rid, offset2, rc);
uniq_aligns.insert( a );
//if ( ! ValidateAlign(a) ) {
// #pragma omp critical
// {
// std::cout << "Could not validate alignment ";
// std::cout << "read1 on " << loc1.start << "," << stop1
// << " read2 on " << loc2.start << "," << stop2 << std::endl;
// int ulen = mpGraph->getEdge( loc1.uid ).getLength();
// std::cout << "ulen= " << ulen << std::endl;
// }
//}
}
// Special treatmnet of palindrome cases, where the edge consists only
// one kmer, and both orientation of the kmers are the same and should
// all be considered!
if ( isPalindrome ) {
Bool rc = loc2.rc ^ loc1.rc ^ 1;
int stop2p = - loc2.start + 1;
int start2p = - stop2 + 1;
int offset2 = ( loc1.rc ? stop1 - stop2p : start2p - loc1.start );
simple_align_data a(loc1.rid, loc2.rid, offset2, rc);
uniq_aligns.insert( simple_align_data(loc1.rid, loc2.rid, offset2, rc) );
//if ( ! ValidateAlign(a) ) {
// #pragma omp critical
// {
// std::cout << "Could not validate alignment " << a.rc2 << std::endl;
// std::cout << "read1 on " << loc1.start << "," << stop1
// << " read2(palindrom) on " << loc2.start << "," << stop2
// << " reverted to " << start2p << "," << stop2p
// << std::endl;
// int ulen = mpGraph->getEdge( loc1.uid ).getLength();
// std::cout << "ulen= " << ulen << std::endl;
// }
//}
}
}
}
pFriends->clear();
int n_false_align = 0;
for( std::set<simple_align_data>::iterator it = uniq_aligns.begin(), end = uniq_aligns.end();
it != end; it++ ) {
if ( ValidateAlign( *it ) )
pFriends->push_back( Friend(it->id2,it->offset,it->rc2) );
else
n_false_align++;
}
}
// Pathing provided the read head on the unipaths graph
void PathOneRead ( size_t read_id, PathSegVec *loc_vec ) const {
std::set<ReadLocOnUnipath> locs;
const bvec& read = mReads[ read_id ];
int readLen = read.size();
int nkmers = readLen - K + 1;
if ( nkmers < 0 ) return;
// pathing
for ( int rpos = 0; rpos < nkmers; rpos++ ) {
KMer<K> kmer( read.begin() + rpos );
KDef const* pDef = mpDict->lookup(kmer);
if ( !pDef ) { continue; }
EdgeID edgeID = pDef->getEdgeID();
const UnipathEdge *pEdge = &mpGraph->getEdge(edgeID);
KmerID kmerID = pEdge->getKmerID( pDef->getEdgeOffset() );
bool rc = IsRC( kmer,kmerID );
// number of skipped bases from the unipath
int skipped = kmerID.val() - pEdge->getInitialKmerID().val();
short ustart = ( rc ? skipped - (nkmers-1 - rpos) : skipped - rpos );
ReadLocOnUnipath the_loc =
{ edgeID, ustart, static_cast<unsigned>(read_id), rc };
locs.insert( the_loc );
}
(*loc_vec).assign( locs.begin(), locs.end() );
}
int DownSampleLocsOfOneRead( PathSegVec *loc_vec ) const {
size_t nsegs = (*loc_vec).size();
if ( nsegs < 2 ) return 0;
int nkmers = mReads[ (*loc_vec)[0].rid ].size() -K + 1;
vec< std::pair<int,int> > seg_coverage(nsegs);
vec<int> seg_lens(nsegs);
for ( size_t i = 0; i < nsegs; ++i ) {
int ulen = mpGraph->getEdge( (*loc_vec)[i].uid ).getLength();
int rstart = -1, rstop = -1;
if ( ! (*loc_vec)[i].rc ) {
rstart = std::max( -(*loc_vec)[i].start, 0 );
rstop = std::min( rstart + ulen , nkmers );
}
else {
rstart = std::max( (*loc_vec)[i].start + nkmers - ulen, 0 );
rstop = std::min( rstart + ulen , nkmers );
}
seg_coverage[i] = std::make_pair(rstart, rstop);
seg_lens[i] = ulen;
}
// Select the segments, starting from the largest until every 10-base
// division in the read has enough coverage. Long unipaths are always
// kept.
const int kDivisionSize = 10;
const int kTargetDivCoverage = 1;
const int kGoodUnipathLen = 5;
vec<Bool> todel( nsegs, true);
vec<int> seg_indices( nsegs, vec<int>::IDENTITY );
ReverseSortSync( seg_lens, seg_indices );
vec<int> times_covered( (nkmers-1)/kDivisionSize + 1, 0);
bool ignore_tail_division = ( times_covered.size() * kDivisionSize - nkmers < 10 ) ;
for ( size_t i = 0; i < nsegs; ++i ) {
size_t seg_index = seg_indices[i];
//// discard redundant segments ( this division it covers all has enough segments )
//bool is_redundant = true;
//for( int j = seg_coverage[seg_index].first / kDivisionSize;
// j <= (seg_coverage[seg_index].second-1) / kDivisionSize; ++j )
// if ( times_covered[j] < kTargetDivCoverage ) {
// is_redundant = false;
// break;
// }
//if ( seg_lens[i] < kGoodUnipathLen && ! is_redundant
// || seg_lens[i] >= kGoodUnipathLen ) {
for( int j = seg_coverage[seg_index].first / kDivisionSize;
j <= (seg_coverage[seg_index].second-1) / kDivisionSize; ++j )
times_covered[j]++;
todel[seg_index] = false;
//}
// Are all divisions covered?
bool is_well_covered = true;
size_t div_end = ( ignore_tail_division ? times_covered.size() -1 : times_covered.size() );
for ( size_t j = 0; j < div_end; ++j ) {
if ( times_covered[j] < kTargetDivCoverage) {
is_well_covered = false;
break;
}
}
// exit conditions
if ( is_well_covered && seg_lens[i] < kGoodUnipathLen ) { break; }
}
// return values
EraseIf( *loc_vec, todel );
return nsegs - (*loc_vec).size();
}
bool IsRC( KMer<K> const& kmer, KmerID const& kmerID ) const {
using std::equal;
HugeBVec::const_iterator seqItr( mpGraph->getBases( kmerID ) );
bool result = ! equal( kmer.begin(),kmer.end(),seqItr );
Assert( !result || equal( kmer.rcbegin(),kmer.rcend(),seqItr ) );
return result;
}
void ReportMemUsage() {
std::cout << Date() << ": Peak memory use = "
<< PeakMemUsageBytes( ) / 1000000000.0 << std::resetiosflags(std::ios::fixed)
<< " GB" << std::endl;
}
private:
const vecbvec &mReads;
KmerDict<K> *mpDict;
UnipathGraph<K> *mpGraph;
vec<ReadULocVec> mULocs; // read path seg on unipaths, indexed by unipaths id
// sorted by the starting positon on unipath
int mCopyNumberMax; // Ignore short unipath with high copy number
Bool mUseDownSampling;
int mVerbose;
};
#endif
|
nvptx_target_printf_codegen.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
// expected-no-diagnostics
extern int printf(const char *, ...);
// Check a simple call to printf end-to-end.
int CheckSimple() {
#pragma omp target
{
// printf in master-only basic block.
const char* fmt = "%d %lld %f";
printf(fmt, 1, 2ll, 3.0);
}
return 0;
}
void CheckNoArgs() {
#pragma omp target
{
// printf in master-only basic block.
printf("hello, world!");
}
}
// Check that printf's alloca happens in the entry block, not inside the if
// statement.
int foo;
void CheckAllocaIsInEntryBlock() {
#pragma omp target
{
if (foo) {
printf("%d", 42);
}
}
}
//
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-64-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FMT:%.*]] = alloca i8*, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0), i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-64-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-64-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-64-SAME: () #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i64 0, i64 0), i8* null, i32 0)
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-64-SAME: (i64 [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[FOO_ADDR:%.*]] = alloca i64, align 8
// CHECK-64-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-64-NEXT: store i64 [[FOO]], i64* [[FOO_ADDR]], align 8
// CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[FOO_ADDR]] to i32*
// CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
// CHECK-64-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-64-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-64: if.then:
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-64-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i64 0, i64 0), i8* [[TMP3]], i32 4)
// CHECK-64-NEXT: br label [[IF_END]]
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
// CHECK-64: if.end:
// CHECK-64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-64-NEXT: ret void
//
//
//
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckSimple_l13
// CHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FMT:%.*]] = alloca i8*, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS:%.*]], align 8
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: store i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load i8*, i8** [[FMT]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 1, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 1
// CHECK-32-NEXT: store i64 2, i64* [[TMP3]], align 8
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[PRINTF_ARGS]], %printf_args* [[TMP]], i32 0, i32 2
// CHECK-32-NEXT: store double 3.000000e+00, double* [[TMP4]], align 8
// CHECK-32-NEXT: [[TMP5:%.*]] = bitcast %printf_args* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP6:%.*]] = call i32 @__llvm_omp_vprintf(i8* [[TMP1]], i8* [[TMP5]], i32 24)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckNoArgs_l25
// CHECK-32-SAME: () #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str1, i32 0, i32 0), i8* null, i32 0)
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_CheckAllocaIsInEntryBlock_l36
// CHECK-32-SAME: (i32 [[FOO:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[FOO_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP:%.*]] = alloca [[PRINTF_ARGS_0:%.*]], align 8
// CHECK-32-NEXT: store i32 [[FOO]], i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[FOO_ADDR]], align 4
// CHECK-32-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0
// CHECK-32-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK-32: if.then:
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[PRINTF_ARGS_0]], %printf_args.0* [[TMP]], i32 0, i32 0
// CHECK-32-NEXT: store i32 42, i32* [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = bitcast %printf_args.0* [[TMP]] to i8*
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__llvm_omp_vprintf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str2, i32 0, i32 0), i8* [[TMP3]], i32 4)
// CHECK-32-NEXT: br label [[IF_END]]
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
// CHECK-32: if.end:
// CHECK-32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
// CHECK-32-NEXT: ret void
//
|
GB_unop__signum_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__signum_fp64_fp64)
// op(A') function: GB (_unop_tran__signum_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = GB_signum (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_signum (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = GB_signum (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIGNUM || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__signum_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_signum (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_signum (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__signum_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_scan.2.c | /*
* @@name: scan.2.c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#define N 100
int main(void)
{
int a[N], b[N];
int x = 0;
// initialization
for (int k = 0; k < N; k++)
a[k] = k + 1;
// a[k] is not included in the computation of producing results in b[k]
#pragma omp parallel for simd reduction(inscan,+: x)
for (int k = 0; k < N; k++) {
b[k] = x;
#pragma omp scan exclusive(x)
x += a[k];
}
printf("x = %d, b[0:3] = %d %d %d\n", x, b[0], b[1], b[2]);
// 5050, 0 1 3
return 0;
}
|
depth-metrics.h | // License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
//
// Plane Fit implementation follows http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points algorithm
#pragma once
#include <vector>
#include <mutex>
#include <array>
#include <imgui.h>
#include <librealsense2-framos/rsutil.h>
#include <librealsense2-framos/rs.hpp>
#include "rendering.h"
namespace rs2
{
namespace depth_quality
{
struct snapshot_metrics
{
int width;
int height;
rs2::region_of_interest roi;
float distance;
float angle;
float angle_x;
float angle_y;
plane p;
std::array<float3, 4> plane_corners;
};
struct single_metric_data
{
single_metric_data(std::string name, float val) :
val(val), name(name) {}
float val;
std::string name;
};
using callback_type = std::function<void(
const std::vector<rs2::float3>& points,
const plane p,
const rs2::region_of_interest roi,
const float baseline_mm,
const float focal_length_pixels,
const int ground_thruth_mm,
const bool plane_fit,
const float plane_fit_to_ground_truth_mm,
const float distance_mm,
bool record,
std::vector<single_metric_data>& samples)>;
inline plane plane_from_point_and_normal(const rs2::float3& point, const rs2::float3& normal)
{
return{ normal.x, normal.y, normal.z, -(normal.x*point.x + normal.y*point.y + normal.z*point.z) };
}
//Based on: http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points
inline plane plane_from_points(const std::vector<rs2::float3> points)
{
if (points.size() < 3) throw std::runtime_error("Not enough points to calculate plane");
rs2::float3 sum = { 0,0,0 };
for (auto point : points) sum = sum + point;
rs2::float3 centroid = sum / float(points.size());
double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0;
for (auto point : points) {
rs2::float3 temp = point - centroid;
xx += temp.x * temp.x;
xy += temp.x * temp.y;
xz += temp.x * temp.z;
yy += temp.y * temp.y;
yz += temp.y * temp.z;
zz += temp.z * temp.z;
}
double det_x = yy*zz - yz*yz;
double det_y = xx*zz - xz*xz;
double det_z = xx*yy - xy*xy;
double det_max = std::max({ det_x, det_y, det_z });
if (det_max <= 0) return{ 0, 0, 0, 0 };
rs2::float3 dir{};
if (det_max == det_x)
{
float a = static_cast<float>((xz*yz - xy*zz) / det_x);
float b = static_cast<float>((xy*yz - xz*yy) / det_x);
dir = { 1, a, b };
}
else if (det_max == det_y)
{
float a = static_cast<float>((yz*xz - xy*zz) / det_y);
float b = static_cast<float>((xy*xz - yz*xx) / det_y);
dir = { a, 1, b };
}
else
{
float a = static_cast<float>((yz*xy - xz*yy) / det_z);
float b = static_cast<float>((xz*xy - yz*xx) / det_z);
dir = { a, b, 1 };
}
return plane_from_point_and_normal(centroid, dir.normalize());
}
inline double evaluate_pixel(const plane& p, const rs2_intrinsics* intrin, float x, float y, float distance, float3& output)
{
float pixel[2] = { x, y };
rs2_deproject_pixel_to_point(&output.x, intrin, pixel, distance);
return evaluate_plane(p, output);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y, float min, float max)
{
float3 point;
auto f = evaluate_pixel(p, intrin, x, y, max, point);
if (fabs(max - min) < 1e-3) return point;
auto n = evaluate_pixel(p, intrin, x, y, min, point);
if (f*n > 0) return{ 0, 0, 0 };
auto avg = (max + min) / 2;
auto mid = evaluate_pixel(p, intrin, x, y, avg, point);
if (mid*n < 0) return approximate_intersection(p, intrin, x, y, min, avg);
return approximate_intersection(p, intrin, x, y, avg, max);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y)
{
return approximate_intersection(p, intrin, x, y, 0.f, 1000.f);
}
inline snapshot_metrics analyze_depth_image(
const rs2::video_frame& frame,
float units, float baseline_mm,
const rs2_intrinsics * intrin,
rs2::region_of_interest roi,
const int ground_truth_mm,
bool plane_fit_present,
std::vector<single_metric_data>& samples,
bool record,
callback_type callback)
{
auto pixels = (const uint16_t*)frame.get_data();
const auto w = frame.get_width();
const auto h = frame.get_height();
snapshot_metrics result{ w, h, roi, {} };
std::mutex m;
std::vector<rs2::float3> roi_pixels;
//#pragma omp parallel for - TODO optimization envisaged
for (int y = roi.min_y; y < roi.max_y; ++y)
for (int x = roi.min_x; x < roi.max_x; ++x)
{
auto depth_raw = pixels[y*w + x];
if (depth_raw)
{
// units is float
float pixel[2] = { float(x), float(y) };
float point[3];
auto distance = depth_raw * units;
rs2_deproject_pixel_to_point(point, intrin, pixel, distance);
std::lock_guard<std::mutex> lock(m);
roi_pixels.push_back({ point[0], point[1], point[2] });
}
}
if (roi_pixels.size() < 3) { // Not enough pixels in RoI to fit a plane
return result;
}
plane p = plane_from_points(roi_pixels);
if (p == plane{ 0, 0, 0, 0 }) { // The points in RoI don't span a valid plane
return result;
}
// Calculate intersection of the plane fit with a ray along the center of ROI
// that by design coincides with the center of the frame
float3 plane_fit_pivot = approximate_intersection(p, intrin, intrin->width / 2.f, intrin->height / 2.f);
float plane_fit_to_gt_offset_mm = (ground_truth_mm > 0.f) ? (plane_fit_pivot.z * 1000 - ground_truth_mm) : 0;
result.p = p;
result.plane_corners[0] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.min_y));
result.plane_corners[1] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.min_y));
result.plane_corners[2] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.max_y));
result.plane_corners[3] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.max_y));
// Distance of origin (the camera) from the plane is encoded in parameter D of the plane
// The parameter represents the euclidian distance (along plane normal) from camera to the plane
result.distance = static_cast<float>(-p.d * 1000);
// Angle can be calculated from param C
result.angle = static_cast<float>(std::acos(std::abs(p.c)) / M_PI * 180.);
callback(roi_pixels, p, roi, baseline_mm, intrin->fx, ground_truth_mm, plane_fit_present,
plane_fit_to_gt_offset_mm, result.distance, record, samples);
// Calculate normal
auto n = float3{ p.a, p.b, p.c };
auto cam = float3{ 0.f, 0.f, -1.f };
auto dot = n * cam;
auto u = cam - n * dot;
result.angle_x = u.x;
result.angle_y = u.y;
return result;
}
}
}
|
ams.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */
hypre_ParCSRMatrix *A,
/* right-hand side */
hypre_ParVector *f,
/* relaxation type */
HYPRE_Int relax_type,
/* number of sweeps */
HYPRE_Int relax_times,
/* l1 norms of the rows of A */
HYPRE_Real *l1_norms,
/* damping coefficient (usually <= 1) */
HYPRE_Real relax_weight,
/* SOR parameter (usually in (0,2) */
HYPRE_Real omega,
/* for cheby smoothers */
HYPRE_Real max_eig_est,
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
/* initial/updated approximation */
hypre_ParVector *u,
/* temporary vector */
hypre_ParVector *v,
/* temporary vector */
hypre_ParVector *z)
{
HYPRE_Int sweep;
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
HYPRE_Int i, num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_ParVectorCopy(f,v);
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v);
/* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */
for (i = 0; i < num_rows; i++)
u_data[i] += v_data[i] / l1_norms[i];
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data);
}
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
}
else if (relax_weight == 1.0) /* SSOR */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
}
else /* scaled SSOR */
{
HYPRE_Real dif;
HYPRE_Real c1 = omega * relax_weight;
HYPRE_Real c2 = omega * (1.0 - relax_weight);
/* Forward local pass (save initial guess in v_data) */
for (i = 0; i < num_rows; i++)
{
dif = 0.0;
v_data[i] = u_data[i];
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] < i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
dif = 0.0;
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] > i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
}
hypre_TFree(u_offd_data);
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
hypre_TFree(u_offd_data);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A,
f,
max_eig_est,
min_eig_est,
cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows);
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
if (option == 1)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the scaled l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the scaled CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_I[i]];
if (diag != 0.0) l1_norm[i] = diag;
else l1_norm[i] = 1.0;
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
for (i = 0; i < num_rows; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = 0; i < num_rows; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
//for (i = 0; i < num_rows; i++) l1_norm[i]=1.0/l1_norm[i];
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
if (ams_data -> A_l1_norms)
hypre_TFree(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
hypre_TFree(ams_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int col_starts_size, *col_starts;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_Int *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int col_starts_size, *col_starts;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixAdd(A_local, B_local);
C_local = hypre_CSRMatrixDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type,
NULL, &ams_data -> A_l1_norms);
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixAdd(A_local, B_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_Int *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1);
HYPRE_Int part_size, *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
hypre_error_in_arg(4);
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
#ifdef HYPRE_NO_GLOBAL_PARTITION
part_size = 2;
#else
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size);
part_size++;
#endif
row_starts = hypre_TAlloc(HYPRE_Int,part_size);
col_starts = hypre_TAlloc(HYPRE_Int,part_size);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_Int *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_Int *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int *vert_part, num_global_vert;
HYPRE_Int vert_start, vert_end;
/* Find the processor partitioning of the vertices */
#ifdef HYPRE_NO_GLOBAL_PARTITION
vert_part = hypre_TAlloc(HYPRE_Int,2);
hypre_MPI_Scan(&num_local_vert, &vert_part[1], 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - num_local_vert;
hypre_MPI_Allreduce(&num_local_vert, &num_global_vert, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
vert_part = hypre_TAlloc(HYPRE_Int,num_procs+1);
hypre_MPI_Allgather(&num_local_vert, 1, HYPRE_MPI_INT, &vert_part[1], 1, HYPRE_MPI_INT, comm);
vert_part[0] = 0;
for (i = 0; i < num_procs; i++)
vert_part[i+1] += vert_part[i];
num_global_vert = vert_part[num_procs];
#endif
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = vert_number[i] - vert_start;
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
GenerateDiagAndOffd(local, G, vert_start, vert_end);
hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows);
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelaxThreads
* 1 = l1-scaled Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_type,
HYPRE_Int relax_times,
HYPRE_Real *l1_norms,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *z)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res2;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/* only allow jacobi and GS */
if (relax_type > 2)
relax_type = 2;
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
if (relax_type == 1) /* Jacobi */
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
else if (relax_type == 2) /* GS */
{
if (relax_weight == 1 && omega == 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
hypre_TFree(tmp_data);
}
else
{
HYPRE_Real c1 = omega*relax_weight;
HYPRE_Real c2 = omega*(1.0-relax_weight);
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
hypre_TFree(tmp_data);
}
} /* end of Jacobi or G.S. */
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
|
signalMachine.c | #include <getopt.h>
#include <string.h>
#include "signalMachineUtils.h"
#include "pairwiseAligner.h"
#define STEP 6 // space between degenerate nucleotides in for error correction
#define ESTIMATE_PARAMS 1
#define ASSIGNMENT_THRESHOLD 0.1
typedef enum {
full = 0,
variantCaller = 1,
assignments = 2
} OutputFormat;
void usage() {
fprintf(stderr, "signalMachine binary, meant to be used through the signalAlign program.\n");
fprintf(stderr, "See doc for runSignalAlign for help\n");
}
void printPairwiseAlignmentSummary(struct PairwiseAlignment *pA) {
st_uglyf("contig 1: %s\n", pA->contig1);
st_uglyf("strand 1: %lld\n", pA->strand1);
st_uglyf("start 1: %lld\n", pA->start1);
st_uglyf("end 1: %lld\n", pA->end1);
st_uglyf("contig 2: %s\n", pA->contig2);
st_uglyf("strand 2: %lld\n", pA->strand2);
st_uglyf("start 2: %lld\n", pA->start2);
st_uglyf("end 2: %lld\n", pA->end2);
}
inline int64_t adjustReferenceCoordinate(int64_t x_i, int64_t referenceSeqOffset,
int64_t referenceLengthInKmers, int64_t referenceLength,
Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return x_i + referenceSeqOffset;
} else {
return referenceLengthInKmers - (x_i + (referenceLength - referenceSeqOffset));
}
}
inline char *makeReferenceKmer(const char *k_i, Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return stString_copy(k_i);
} else {
return stString_reverseComplementString(k_i);
}
}
inline char *kmerFromString(const char *string, int64_t start, int64_t kmerLength) {
char *k_i = st_malloc(kmerLength * sizeof(char));
for (int64_t i = 0; i < kmerLength; i++) {
k_i[i] = *(string + (start + i));
}
k_i[kmerLength] = '\0';
return k_i;
}
inline int64_t adjustQueryPosition(int64_t unadjustedQueryPosition, int64_t kmerLength, Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return unadjustedQueryPosition;
} else {
return (kmerLength - 1) - unadjustedQueryPosition;
}
}
void writePosteriorProbsFull(char *posteriorProbsFile, char *readLabel, StateMachine *sM,
NanoporeReadAdjustmentParameters npp, double *events, char *target, bool forward,
char *contig, int64_t eventSequenceOffset, int64_t referenceSequenceOffset,
stList *alignedPairs, Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
// get some lengths outside the loop
int64_t refLength = (int64_t )strlen(target);
int64_t refLengthInKmers = refLength - sM->kmerLength;
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// nucleotide sequence coordinate
int64_t x_i = stIntTuple_get(aPair, 1);
// adjust back to reference coordinates
int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength,
strand, forward);
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
double eventMean = sequence_getEventMean(events, y);
double eventNoise = sequence_getEventNoise(events, y);
double eventDuration = sequence_getEventDuration(events, y);
// make the kmer string at the target index,
char *k_i = kmerFromString(target, x_i, sM->kmerLength);
int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength);
// get the expected event mean amplitude and noise
double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)];
double E_noise = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS + 2)];
double scaled_Emean = E_mean * npp.scale + npp.shift;
double scaled_Enoise = E_noise * npp.scale_sd;
double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean,
npp.scale, npp.shift, npp.var);
// make reference kmer
char *refKmer = makeReferenceKmer(k_i, strand, forward);
// write to file
fprintf(fH, "%s\t%"PRId64"\t%s\t%s\t%s\t%"PRId64"\t%f\t%f\t%f\t%s\t%f\t%f\t%f\t%f\t%f\t%s\n",
contig, x_adj, refKmer, readLabel, strandLabel, y, eventMean, eventNoise, eventDuration, k_i,
scaled_Emean, scaled_Enoise, p, descaledEventMean, E_mean, pathKmer);
// cleanup
free(k_i);
free(refKmer);
}
fclose(fH);
}
void writePosteriorProbsVC(char *posteriorProbsFile, char *readLabel, StateMachine *sM, char *target, bool forward,
int64_t eventSequenceOffset, int64_t referenceSequenceOffset, stList *alignedPairs,
Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
char *forwardLabel = forward ? "forward" : "backward";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
// get some lengths outside the loop
int64_t refLength = (int64_t )strlen(target);
int64_t refLengthInKmers = refLength - sM->kmerLength;
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// trimmed nucleotide sequence coordinate
int64_t x_i = stIntTuple_get(aPair, 1);
// make the kmer string at the target index,
char *k_i = kmerFromString(target, x_i, sM->kmerLength);
char *refKmer = makeReferenceKmer(k_i, strand, forward);
stList *queryPositions = path_findDegeneratePositions(refKmer, sM->kmerLength);
// check if this aligned pair reports on a query position
if (stList_length(queryPositions) == 0) {
free(k_i);
free(refKmer);
stList_destruct(queryPositions);
continue;
}
// adjust back to reference coordinates
int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength,
strand, forward);
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
// get the base that was called in this aligned pair
int64_t nQueryPositions = stList_length(queryPositions);
for (int64_t q = 0; q < nQueryPositions; q++) {
// position in the reference kmer eg. AGXGG -> 2
int64_t unadjustedQueryPosition = *(int64_t *)stList_get(queryPositions, q);
// position in the pathKmer
int64_t queryPosition = adjustQueryPosition(unadjustedQueryPosition, sM->kmerLength,
strand, forward);
// called base
char base = pathKmer[queryPosition];
// position in the reference we're reporting on
int64_t reportPosition = x_adj + unadjustedQueryPosition;
fprintf(fH, "%"PRId64"\t%"PRId64"\t%c\t%f\t%s\t%s\t%s\n", y, reportPosition, base, p,
strandLabel, forwardLabel, readLabel);
}
free(k_i);
free(refKmer);
stList_destruct(queryPositions);
}
fclose(fH);
}
void writeAssignments(char *posteriorProbsFile, StateMachine *sM, double *events, int64_t eventSequenceOffset,
NanoporeReadAdjustmentParameters npp, stList *alignedPairs, Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
// get the observed event mean
double eventMean = sequence_getEventMean(events, y);
// get the kmer index
int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength);
// get the expected mean from the model
double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)];
// descale the observed mean
double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean,
npp.scale, npp.shift, npp.var);
fprintf(fH, "%s\t%s\t%lf\t%lf\n", pathKmer, strandLabel, descaledEventMean, p);
}
fclose(fH);
}
void outputAlignment(OutputFormat fmt,
char *posteriorProbsFile, char *readLabel, StateMachine *sM, NanoporeReadAdjustmentParameters npp,
double *events, char *target, bool forward, char *contig, int64_t eventSequenceOffset,
int64_t referenceSequenceOffset, stList *alignedPairs, Strand strand) {
switch (fmt) {
case full:
writePosteriorProbsFull(posteriorProbsFile, readLabel, sM, npp, events, target, forward, contig,
eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand);
break;
case variantCaller:
writePosteriorProbsVC(posteriorProbsFile, readLabel, sM, target, forward, eventSequenceOffset,
referenceSequenceOffset, alignedPairs, strand);
break;
case assignments:
writeAssignments(posteriorProbsFile, sM, events, eventSequenceOffset, npp, alignedPairs, strand);
break;
default:
fprintf(stderr, "signalAlign - No valid output format provided\n");
return;
}
}
StateMachine *buildStateMachine(const char *modelFile, NanoporeReadAdjustmentParameters npp, StateMachineType type,
NanoporeHDP *nHdp) {
if ((type != threeState) && (type != threeStateHdp)) {
st_errAbort("signalAlign - incompatible stateMachine type request");
}
if (!stFile_exists(modelFile)) {
st_errAbort("signalAlign - ERROR: couldn't find model file here: %s\n", modelFile);
}
if (type == threeState) {
StateMachine *sM = getStateMachine3_descaled(modelFile, npp, !ESTIMATE_PARAMS);
return sM;
}
if (type == threeStateHdp) {
StateMachine *sM = getHdpStateMachine(nHdp, modelFile, npp);
return sM;
}
else {
st_errAbort("signalAlign - ERROR: buildStateMachine, didn't get correct input\n");
}
return 0;
}
inline void loadHmmRoutine(const char *hmmFile, StateMachine *sM, StateMachineType type, Hmm *expectations) {
if ((type != threeState) && (type != threeStateHdp)) {
st_errAbort("LoadSignalHmm : unupported stateMachineType");
}
hmmContinuous_loadSignalHmm(hmmFile, sM, type, expectations);
}
StateMachine *buildStateMachineAndLoadHmm(const char *modelFile, NanoporeReadAdjustmentParameters npp,
StateMachineType type, NanoporeHDP *nHdp) {
StateMachine *sM = buildStateMachine(modelFile, npp, type, nHdp);
// commented out because now the model file has the transitions and the event model, so no longer need to
// load the .hmm into the stateMachine
//if (HmmFile != NULL) {
// loadHmmRoutine(HmmFile, sM, sM->type, hmmExpectations);
//}
return sM;
}
void updateHdpFromAssignments(const char *nHdpFile, const char *expectationsFile, const char *nHdpOutFile) {
NanoporeHDP *nHdp = deserialize_nhdp(nHdpFile);
Hmm *hdpHmm = hdpHmm_loadFromFile(expectationsFile, threeStateHdp, nHdp);
hmmContinuous_destruct(hdpHmm, hdpHmm->type);
fprintf(stderr, "signalAlign - Running Gibbs on HDP\n");
execute_nhdp_gibbs_sampling(nHdp, 10000, 100000, 100, FALSE);
finalize_nhdp_distributions(nHdp);
fprintf(stderr, "signalAlign - Serializing HDP to %s\n", nHdpOutFile);
serialize_nhdp(nHdp, nHdpOutFile);
destroy_nanopore_hdp(nHdp);
}
static double totalScore(stList *alignedPairs) {
double score = 0.0;
for (int64_t i = 0; i < stList_length(alignedPairs); i++) {
stIntTuple *aPair = stList_get(alignedPairs, i);
score += stIntTuple_get(aPair, 0);
}
return score;
}
double scoreByPosteriorProbabilityIgnoringGaps(stList *alignedPairs) {
/*
* Gives the average posterior match probability per base of the two sequences, ignoring indels.
*/
return 100.0 * totalScore(alignedPairs) / ((double) stList_length(alignedPairs) * PAIR_ALIGNMENT_PROB_1);
}
stList *performSignalAlignment(StateMachine *sM, Sequence *eventSequence, int64_t *eventMap,
int64_t mapOffset, char *target, PairwiseAlignmentParameters *p,
stList *unmappedAnchors, DegenerateType degenerate) {
if ((sM->type != threeState) && (sM->type != threeStateHdp)) {
st_errAbort("signalAlign - You're trying to do the wrong king of alignment");
}
int64_t lX = sequence_correctSeqLength(strlen(target), kmer, sM->kmerLength);
// remap anchor pairs
stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset);
// make sequences
Sequence *sX = sequence_constructReferenceKmerSequence(lX, target, sequence_getKmer,
sequence_sliceNucleotideSequence, degenerate, kmer);
// do alignment
stList *alignedPairs = getAlignedPairsUsingAnchors(sM, sX, eventSequence, filteredRemappedAnchors, p,
diagonalCalculationPosteriorMatchProbs, 1, 1);
return alignedPairs;
}
Sequence *makeEventSequenceFromPairwiseAlignment(double *events, int64_t queryStart, int64_t queryEnd,
int64_t *eventMap) {
// find the event mapped to the start and end of the 2D read alignment
int64_t startIdx = eventMap[queryStart];
int64_t endIdx = eventMap[queryEnd];
// move the event pointer to the first event
size_t elementSize = sizeof(double);
void *elements = (char *)events + ((startIdx * NB_EVENT_PARAMS) * elementSize);
// make the eventSequence
Sequence *eventS = sequence_constructEventSequence(endIdx - startIdx, elements);
return eventS;
}
void getSignalExpectations(StateMachine *sM, Hmm *hmmExpectations, Sequence *eventSequence,
int64_t *eventMap, int64_t mapOffset, char *trainingTarget, PairwiseAlignmentParameters *p,
stList *unmappedAnchors, DegenerateType degenerate) {
// correct sequence length
int64_t lX = sequence_correctSeqLength(strlen(trainingTarget), event, sM->kmerLength);
// remap the anchors
stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset);
Sequence *target = sequence_constructKmerSequence(
lX, trainingTarget, sequence_getKmer, sequence_sliceNucleotideSequence,
(degenerate == canonicalVariants ? CANONICAL_NUCLEOTIDES :
(degenerate == cytosineMethylation2 ? TWO_CYTOSINES : THREE_CYTOSINES)),
(degenerate == canonicalVariants ? NB_CANONICAL_BASES :
(degenerate == cytosineMethylation2 ? (NB_CYTOSINE_OPTIONS - 1) : NB_CYTOSINE_OPTIONS)),
kmer);
getExpectationsUsingAnchors(sM, hmmExpectations, target, eventSequence, filteredRemappedAnchors, p,
diagonalCalculation_Expectations, 1, 1);
}
int main(int argc, char *argv[]) {
StateMachineType sMtype = threeState;
int64_t j = 0;
int64_t diagExpansion = 50;
double threshold = 0.01;
int64_t constraintTrim = 14;
int64_t degenerate;
int64_t outFmt;
bool twoD = FALSE;
char *templateModelFile = NULL;
char *complementModelFile = NULL;
char *readLabel = NULL;
char *npReadFile = NULL;
char *forwardReference = NULL;
char *backwardReference = NULL;
char *errorCorrectPath = NULL;
char *posteriorProbsFile = NULL;
char *templateExpectationsFile = NULL;
char *complementExpectationsFile = NULL;
char *templateHdp = NULL;
char *complementHdp = NULL;
int key;
while (1) {
static struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"sm3Hdp", no_argument, 0, 'd'},
{"sparse_output", no_argument, 0, 's'},
{"twoD", no_argument, 0, 'e'},
{"degenerate", required_argument, 0, 'o'},
{"templateModel", required_argument, 0, 'T'},
{"complementModel", required_argument, 0, 'C'},
{"readLabel", required_argument, 0, 'L'},
{"npRead", required_argument, 0, 'q'},
{"forward_reference", required_argument, 0, 'f'},
{"backward_reference", required_argument, 0, 'b'},
{"error_correct_path", required_argument, 0, 'p'},
{"posteriors", required_argument, 0, 'u'},
{"templateHdp", required_argument, 0, 'v'},
{"complementHdp", required_argument, 0, 'w'},
{"templateExpectations", required_argument, 0, 't'},
{"complementExpectations", required_argument, 0, 'c'},
{"diagonalExpansion", required_argument, 0, 'x'},
{"threshold", required_argument, 0, 'D'},
{"constraintTrim", required_argument, 0, 'm'},
{0, 0, 0, 0} };
int option_index = 0;
key = getopt_long(argc, argv, "h:d:e:s:o:p:a:T:C:L:q:f:b:p:u:v:w:t:c:x:D:m:",
long_options, &option_index);
if (key == -1) {
//usage();
break;
}
switch (key) {
case 'h':
usage();
return 1;
case 's':
j = sscanf(optarg, "%" PRIi64 "", &outFmt);
assert (j == 1);
break;
case 'e':
twoD = TRUE;
break;
case 'o':
j = sscanf(optarg, "%" PRIi64 "", °enerate);
assert (j == 1);
break;
case 'd':
sMtype = threeStateHdp;
break;
case 'T':
templateModelFile = stString_copy(optarg);
break;
case 'C':
complementModelFile = stString_copy(optarg);
break;
case 'L':
readLabel = stString_copy(optarg);
break;
case 'q':
npReadFile = stString_copy(optarg);
break;
case 'f':
forwardReference = stString_copy(optarg);
break;
case 'b':
backwardReference= stString_copy(optarg);
break;
case 'p':
errorCorrectPath = stString_copy(optarg);
break;
case 'u':
posteriorProbsFile = stString_copy(optarg);
break;
case 't':
templateExpectationsFile = stString_copy(optarg);
break;
case 'c':
complementExpectationsFile = stString_copy(optarg);
break;
case 'v':
templateHdp = stString_copy(optarg);
break;
case 'w':
complementHdp = stString_copy(optarg);
break;
case 'x':
j = sscanf(optarg, "%" PRIi64 "", &diagExpansion);
assert (j == 1);
assert (diagExpansion >= 0);
diagExpansion = (int64_t)diagExpansion;
break;
case 'D':
j = sscanf(optarg, "%lf", &threshold);
assert (j == 1);
assert (threshold >= 0);
break;
case 'm':
j = sscanf(optarg, "%" PRIi64 "", &constraintTrim);
assert (j == 1);
assert (constraintTrim >= 0);
constraintTrim = (int64_t)constraintTrim;
break;
default:
usage();
return 1;
}
}
(void) j; // silence unused variable warning.
// check for models
if ((templateModelFile == NULL) || (complementModelFile == NULL && twoD)) {
st_errAbort("Missing model files, exiting\n");
return 1;
}
// Anchors //
// get pairwise alignment from stdin, in exonerate CIGAR format
FILE *fileHandleIn = stdin;
// parse input CIGAR to get anchors
struct PairwiseAlignment *pA;
pA = cigarRead(fileHandleIn);
// Alignment Parameters //
// make the pairwise alignment parameters
PairwiseAlignmentParameters *p = pairwiseAlignmentBandingParameters_construct();
p->threshold = threshold;
p->constraintDiagonalTrim = constraintTrim;
p->diagonalExpansion = diagExpansion;
// HDP routines //
// load HDPs
NanoporeHDP *nHdpT, *nHdpC;
// check
if ((templateHdp != NULL) || (complementHdp != NULL)) {
if ((templateHdp == NULL) || (complementHdp == NULL && twoD)) {
st_errAbort("Need to have template and complement HDPs");
}
if (sMtype != threeStateHdp) {
fprintf(stderr, "[signalAlign] - Warning: this kind of stateMachine does not use the HDPs you gave\n");
}
fprintf(stderr, "[signalAlign] - using NanoporeHDPs\n");
}
#pragma omp parallel sections
{
{
nHdpT = (templateHdp == NULL) ? NULL : deserialize_nhdp(templateHdp);
}
#pragma omp section
{
nHdpC = (complementHdp == NULL) ? NULL : deserialize_nhdp(complementHdp);
}
}
ReferenceSequence *R;
if (errorCorrectPath == NULL) { // not doing error correction
if ((forwardReference == NULL) || (backwardReference == NULL)) {
st_errAbort("[signalAlign] - ERROR: did not get reference files %s %s\n",
forwardReference, backwardReference);
}
R = signalUtils_ReferenceSequenceConstructFull(forwardReference, backwardReference, pA);
} else {
R = signalUtils_ReferenceSequenceConstructEmpty(pA);
}
// Nanopore Read //
// load nanopore read
NanoporeRead *npRead = nanopore_loadNanoporeReadFromFile(npReadFile);
// constrain the event sequence to the positions given by the guide alignment
Sequence *tEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->templateEvents,
pA->start2, pA->end2,
(twoD ? npRead->templateEventMap :
npRead->templateStrandEventMap));
Sequence *cEventSequence;
if (twoD) {
cEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->complementEvents,
pA->start2, pA->end2,
npRead->complementEventMap);
} else {
cEventSequence = NULL;
}
// the aligned pairs start at (0,0) so we need to correct them based on the guide alignment later.
// record the pre-zeroed alignment start and end coordinates here
// for the events:
int64_t tCoordinateShift = twoD ? npRead->templateEventMap[pA->start2] : npRead->templateStrandEventMap[pA->start2];
int64_t cCoordinateShift = twoD ? npRead->complementEventMap[pA->start2] : 0;
// and for the reference:
int64_t rCoordinateShift_t = pA->start1;
int64_t rCoordinateShift_c = twoD ? pA->end1 : 0;
bool forward = pA->strand1; // keep track of whether this is a forward mapped read or not
stList *anchorPairs = signalUtils_guideAlignmentToRebasedAnchorPairs(pA, p); // pA gets modified here, no turning back
if ((templateExpectationsFile != NULL) || (complementExpectationsFile != NULL)) {
st_uglyf("Starting expectations routine\n");
// Expectation Routine //
StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT);
// temporary way to 'turn off' estimates if I want to
if (ESTIMATE_PARAMS) { //todo remove threshold, not used
signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD,
signalUtils_templateOneDAssignmentsFromRead,
nanopore_adjustTemplateEventsForDrift);
}
// make empty HMM to collect expectations
Hmm *templateExpectations = hmmContinuous_getExpectationsHmm(sMt, p->threshold, 0.001, 0.001);
// get expectations for template
fprintf(stderr, "signalAlign - getting expectations for template\n");
getSignalExpectations(sMt, templateExpectations, tEventSequence,
(twoD ? npRead->templateEventMap : npRead->templateStrandEventMap),
pA->start2,
R->getTemplateTargetSequence(R),
p, anchorPairs, degenerate);
if (sMtype == threeStateHdp) {
fprintf(stderr, "signalAlign - got %" PRId64 "template HDP assignments\n",
hmmContinuous_howManyAssignments(templateExpectations));
}
// write to file
fprintf(stderr, "signalAlign - writing expectations to file: %s\n", templateExpectationsFile);
hmmContinuous_writeToFile(templateExpectationsFile, templateExpectations, sMtype);
// get expectations for the complement
StateMachine *sMc;
Hmm *complementExpectations = NULL;
if (twoD) {
fprintf(stderr, "signalAlign - getting expectations for complement\n");
sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC);
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD,
signalUtils_complementOneDAssignmentsFromRead,
nanopore_adjustComplementEventsForDrift);
}
complementExpectations = hmmContinuous_getExpectationsHmm(sMc, p->threshold, 0.001, 0.001);
getSignalExpectations(sMc, complementExpectations, cEventSequence, npRead->complementEventMap,
pA->start2,
R->getComplementTargetSequence(R),
p, anchorPairs, degenerate);
if (sMtype == threeStateHdp) {
fprintf(stderr, "signalAlign - got %"PRId64"complement HDP assignments\n",
hmmContinuous_howManyAssignments(complementExpectations));
}
// write to file
fprintf(stderr, "signalAlign - writing expectations to file: %s\n", complementExpectationsFile);
hmmContinuous_writeToFile(complementExpectationsFile, complementExpectations, sMtype);
}
stateMachine_destruct(sMt);
signalUtils_ReferenceSequenceDestruct(R);
hmmContinuous_destruct(templateExpectations, sMtype);
nanopore_nanoporeReadDestruct(npRead);
sequence_destruct(tEventSequence);
pairwiseAlignmentBandingParameters_destruct(p);
destructPairwiseAlignment(pA);
stList_destruct(anchorPairs);
if (twoD) {
stateMachine_destruct(sMc);
sequence_destruct(cEventSequence);
hmmContinuous_destruct(complementExpectations, sMtype);
}
return 0;
} else {
// Alignment Procedure //
// Template alignment
fprintf(stderr, "signalAlign - starting template alignment\n");
// make template stateMachine
StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT);
// re-estimate the nanoporeAdjustment parameters
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD,
signalUtils_templateOneDAssignmentsFromRead,
nanopore_adjustTemplateEventsForDrift);
}
if (sMtype == threeStateHdp) {
stateMachine3_setModelToHdpExpectedValues(sMt, nHdpT);
}
stList *templateAlignedPairs = performSignalAlignment(sMt, tEventSequence,
(twoD ? npRead->templateEventMap : npRead->templateStrandEventMap),
pA->start2, R->getTemplateTargetSequence(R),
p, anchorPairs,
degenerate);
double templatePosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(templateAlignedPairs);
// sort
stList_sort(templateAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
// write to file
if (posteriorProbsFile != NULL) {
outputAlignment(outFmt, posteriorProbsFile, readLabel, sMt, npRead->templateParams, npRead->templateEvents,
R->getTemplateTargetSequence(R), forward, pA->contig1, tCoordinateShift, rCoordinateShift_t,
templateAlignedPairs, template);
}
stList *complementAlignedPairs;
double complementPosteriorScore = 0.0;
StateMachine *sMc;
if (twoD) {
// Complement alignment
fprintf(stderr, "signalAlign - starting complement alignment\n");
sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC);
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD,
signalUtils_complementOneDAssignmentsFromRead,
nanopore_adjustComplementEventsForDrift);
}
if (sMtype == threeStateHdp) {
stateMachine3_setModelToHdpExpectedValues(sMc, nHdpC);
}
complementAlignedPairs = performSignalAlignment(sMc, cEventSequence,
npRead->complementEventMap, pA->start2,
R->getComplementTargetSequence(R),
p, anchorPairs, degenerate);
complementPosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(complementAlignedPairs);
// sort
stList_sort(complementAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
// write to file
if (posteriorProbsFile != NULL) {
outputAlignment(outFmt, posteriorProbsFile, readLabel, sMc, npRead->complementParams,
npRead->complementEvents, R->getComplementTargetSequence(R), forward, pA->contig1,
cCoordinateShift, rCoordinateShift_c, complementAlignedPairs, complement);
}
}
fprintf(stdout, "%s %"PRId64"\t%"PRId64"(%f)\t", readLabel, stList_length(anchorPairs),
stList_length(templateAlignedPairs), templatePosteriorScore);
if (twoD) {
fprintf(stdout, "%"PRId64"(%f)\n", stList_length(complementAlignedPairs), complementPosteriorScore);
} else {
fprintf(stdout, "\n");
}
// final alignment clean up
destructPairwiseAlignment(pA);
nanopore_nanoporeReadDestruct(npRead);
signalUtils_ReferenceSequenceDestruct(R);
stateMachine_destruct(sMt);
sequence_destruct(tEventSequence);
stList_destruct(templateAlignedPairs);
if (twoD) {
stateMachine_destruct(sMc);
sequence_destruct(cEventSequence);
stList_destruct(complementAlignedPairs);
}
fprintf(stderr, "signalAlign - SUCCESS: finished alignment of query %s, exiting\n", readLabel);
}
return 0;
}
|
intruder.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "../lib/instrument_roi.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256];
#if 0
= { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
#endif
void global_param_init()
{
global_params[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK;
global_params[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH;
global_params[PARAM_NUM] = PARAM_DEFAULT_NUM;
global_params[PARAM_SEED] = PARAM_DEFAULT_SEED;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
int_error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
int_error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
/*
* Initialization
*/
global_param_init();
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
TIMER_T startTime, stopTime;
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)SEQ_MALLOC(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
//GOTO_SIM();
//TIMER_T startTime;
//TIMER_READ(startTime);
BEGIN_ROI;
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
END_ROI;
//TIMER_T stopTime;
//TIMER_READ(stopTime);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
//GOTO_REAL();
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
SEQ_FREE(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
GB_unop__identity_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_int64)
// op(A') function: GB (_unop_tran__identity_uint64_int64)
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_int64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_bool)
// op(A') function: GB (_unop_tran__identity_uint16_bool)
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_bool)
(
uint16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp.h | #ifndef PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
#define PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
#if defined(PARLAY_OPENMP)
#include <omp.h>
namespace parlay {
// IWYU pragma: private, include "../../parallel.h"
inline size_t num_workers() { return omp_get_max_threads(); }
inline size_t worker_id() { return omp_get_thread_num(); }
template <class F>
inline void parallel_for(size_t start, size_t end, F f, long, bool) {
_Pragma("omp parallel for")
for(size_t i=start; i<end; i++) f(i);
}
bool in_par_do = false;
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
{
left();
}
#pragma omp task
{
right();
}
}
}
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
}
}
} // namespace parlay
#endif
#endif // PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
|
GB_unop__abs_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_int16_int16
// op(A') function: GB_unop_tran__abs_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
biotsavart.h | #pragma once
#include "EigenInclude.h"
#include "types.h"
#include "mapping.h"
#include "debugutils.h"
#include <limits>
#include <math.h>
#include <cmath>
#define VORTEX_RADIUS_DEF 1e-6
// #define VORTEX_RADIUS_SQ 1e-4
#define Nvert 4
// Declaration for parallel computing
#pragma omp declare reduction (sum_Vector3 : UVLM::Types::Vector3 : omp_out += omp_in) initializer(omp_priv = UVLM::Types::zeroVector3())
namespace UVLM
{
namespace BiotSavart
{
// DECLARATIONS
template <typename t_zeta,
typename t_gamma,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_tsurface& target_surface,
t_uout& uout,
// const UVLM::Types::IntPair& dimensions,
const bool& image_method = false,
const t_normals& normal = NULL,
// const bool& horseshoe = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
const bool& horseshoe,
t_uout& uout,
const bool& image_method = false,
const t_normals& normal = NULL,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const int& n_rows = -1,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_ttriad,
typename t_uout>
void surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
t_uout& uout,
unsigned int Mstart = 0,
unsigned int Nstart = 0,
unsigned int Mend = -1,
unsigned int Nend = -1,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void surface_with_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
const bool& horseshoe,
t_uout& uout,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void surface_with_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
t_uout& uout,
const bool& image_method,
const int& n_rows = -1, // default val = -1
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad,
typename t_block>
//typename t_uind>
UVLM::Types::Vector3 vortex_ring
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma_star,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad,
typename t_block>
void horseshoe
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma,
UVLM::Types::Vector3& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad>
//typename t_uind>
UVLM::Types::Vector3 segment
(
const t_triad& target_triad,
const UVLM::Types::Vector3& v1,
const UVLM::Types::Vector3& v2,
const UVLM::Types::Real& gamma,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_uout>
void total_induced_velocity_on_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
t_uout& uout,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_zeta_col,
typename t_u_ind>
void whole_surface_on_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_zeta_col& zeta_col,
t_u_ind& u_ind,
const bool image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_ttriad>
// typename t_uout>
UVLM::Types::Vector3 whole_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
// t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius,
unsigned int Mstart = 0,
unsigned int Nstart = 0
);
template <typename t_ttriad,
typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star>
UVLM::Types::Vector3 total_induced_velocity_on_point
(
const t_ttriad& target_triad,
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const bool& image_method,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
}
}
namespace UVLMlin{
void biot_panel_map( map_RowVec3& velP,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius);
void der_biot_panel(Matrix3d& DerP,
Matrix3d DerVertices[Nvert],
const RowVector3d zetaP,
const Matrix4by3d ZetaPanel,
const double gamma);
void der_biot_panel_map( map_Mat3by3& DerP,
Vec_map_Mat3by3& DerVertices,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius);
void der_runit( Matrix3d& Der,
const RowVector3d& rv,
double rinv,
double minus_rinv3);
Matrix3d Dvcross_by_skew3d(const Matrix3d& Dvcross,
const RowVector3d& rv);
void dvinddzeta(map_Mat3by3 DerC,
map_Mat DerV,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
int& Kzeta_in,
bool& IsBound,
int& M_in_bound, // M of bound surf associated
int& Kzeta_in_bound,
double vortex_radius
);
void aic3( map_Mat AIC3,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
int& M_in,
int& N_in,
double vortex_radius);
void ind_vel(map_RowVec3 velC,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
double vortex_radius);
}
// SOURCE CODE
template <typename t_triad>
inline UVLM::Types::Vector3 UVLM::BiotSavart::segment
(
const t_triad& rp,
const UVLM::Types::Vector3& v1,
const UVLM::Types::Vector3& v2,
const UVLM::Types::Real& gamma,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uind;
UVLM::Types::Real r0[3], r0_mod;
UVLM::Types::Real r1[3], r1_mod;
UVLM::Types::Real r2[3], r2_mod;
r0_mod = 0.0;
r1_mod = 0.0;
r2_mod = 0.0;
// hopefully this loop is unrolled
for (uint i=0; i<3; ++i)
{
r0[i] = v2(i) - v1(i);
r1[i] = rp(i) - v1(i);
r2[i] = rp(i) - v2(i);
r0_mod += r0[i]*r0[i];
r1_mod += r1[i]*r1[i];
r2_mod += r2[i]*r2[i];
}
r0_mod = sqrt(r0_mod);
r1_mod = sqrt(r1_mod);
r2_mod = sqrt(r2_mod);
if ((r1_mod < vortex_radius) || (r2_mod < vortex_radius)){
uind(0) = 0.0;
uind(1) = 0.0;
uind(2) = 0.0;
return uind;
}else{
UVLM::Types::Real r1_cross_r2[3];
r1_cross_r2[0] = r1[1]*r2[2] - r1[2]*r2[1];
r1_cross_r2[1] = r1[2]*r2[0] - r1[0]*r2[2];
r1_cross_r2[2] = r1[0]*r2[1] - r1[1]*r2[0];
UVLM::Types::Real r1_cross_r2_mod_sq;
r1_cross_r2_mod_sq = r1_cross_r2[0]*r1_cross_r2[0] +
r1_cross_r2[1]*r1_cross_r2[1] +
r1_cross_r2[2]*r1_cross_r2[2];
if (r1_cross_r2_mod_sq < vortex_radius*vortex_radius){
uind(0) = 0.0;
uind(1) = 0.0;
uind(2) = 0.0;
return uind;
}else{
UVLM::Types::Real r0_dot_r1;
r0_dot_r1 = r0[0]*r1[0] +
r0[1]*r1[1] +
r0[2]*r1[2];
UVLM::Types::Real r0_dot_r2;
r0_dot_r2 = r0[0]*r2[0] +
r0[1]*r2[1] +
r0[2]*r2[2];
UVLM::Types::Real K;
K = (gamma*UVLM::Constants::INV_PI4/(r1_cross_r2_mod_sq))*
(r0_dot_r1/r1_mod - r0_dot_r2/r2_mod);
uind(0) = K*r1_cross_r2[0];
uind(1) = K*r1_cross_r2[1];
uind(2) = K*r1_cross_r2[2];
return uind;
}
}
}
template <typename t_triad,
typename t_block>
void UVLM::BiotSavart::horseshoe
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma_star,
UVLM::Types::Vector3& uind,
const UVLM::Types::Real& vortex_radius
)
{
// three segments.
//
// 0___________3
// | |
// | |
// | |
// | |
// | |
// | |
// | |
// 1| |2
//
// segments 0-1 and 2-3 are represented as length 1, but they are effectively
// infinite
// segment 3-0 is considered as a normal one
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
// segment 3-0
v1 << x(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
y(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
z(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1));
v2 << x(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
y(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
z(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1));
uind += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma_star,
vortex_radius);
// uind);
// segment 0-1
v1 << x(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
y(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
z(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1));
v2 << x(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1)),
y(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1)),
z(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1));
// here the segment will be considered as 1----->2 and the
// point 2 is in infinity, so beta2=pi
UVLM::Types::Vector3 r0 = v2 - v1;
UVLM::Types::Vector3 r1 = target_triad - v1;
UVLM::Types::Vector3 r2 = target_triad - v2;
UVLM::Types::Vector3 r1_cross_r2 = r1.cross(r2);
UVLM::Types::Real dist = (r1_cross_r2).norm()/r0.norm();
UVLM::Types::Real beta1;
UVLM::Types::Real beta2;
UVLM::Types::Vector3 u_radial;
if (!((r1.norm() < vortex_radius) ||
(r2.norm() < vortex_radius) ||
(r1_cross_r2.norm() < vortex_radius)))
{
beta1 = r0.dot(r1)/(r0.norm()*r1.norm());
beta2 = UVLM::Constants::PI;
u_radial = (r1_cross_r2)/(r1_cross_r2).norm();
uind += gamma_star/(UVLM::Constants::PI4*dist)*(beta1 + 1.0)*
u_radial;
}
// segment 2-3
v1 << x(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1)),
y(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1)),
z(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1));
v2 << x(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
y(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
z(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1));
// here the segment will be considered as 1----->2 and the
// point 1 is in infinity, so beta1=0
r0 = v2 - v1;
r1 = target_triad - v1;
r2 = target_triad - v2;
r1_cross_r2 = r1.cross(r2);
dist = (r1_cross_r2).norm()/r0.norm();
if (!((r1.norm() < vortex_radius) ||
(r2.norm() < vortex_radius) ||
(r1_cross_r2.norm() < vortex_radius)))
{
beta2 = r0.dot(r2)/(r0.norm()*r2.norm());
dist = (r1.cross(r2)).norm()/r0.norm();
u_radial = (r1_cross_r2)/(r1_cross_r2).norm();
uind += gamma_star/(UVLM::Constants::PI4*dist)*(1.0 - beta2)*
u_radial;
}
}
template <typename t_triad,
typename t_block>
// typename t_uind>
UVLM::Types::Vector3 UVLM::BiotSavart::vortex_ring
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uind;
uind.setZero();
if (std::abs(gamma) < UVLM::Constants::EPSILON)
{
return uind;
}
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
const unsigned int n_segment = 4;
for (unsigned int i_segment=0; i_segment<n_segment; ++i_segment)
{
unsigned int start = i_segment;
unsigned int end = (start + 1)%n_segment;
v1 << x(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1)),
y(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1)),
z(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1));
v2 << x(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1)),
y(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1)),
z(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1));
uind += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma,
vortex_radius);
// uind);
}
return uind;
}
template <typename t_zeta,
typename t_gamma,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
t_uout& uout,
unsigned int Mstart,
unsigned int Nstart,
unsigned int Mend,
unsigned int Nend,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::VecVecMatrixX span_seg_uout;
UVLM::Types::VecVecMatrixX chord_seg_uout;
UVLM::Types::allocate_VecVecMat(span_seg_uout, 1, 3, (Mend-Mstart)+1, (Nend-Nstart));
UVLM::Types::allocate_VecVecMat(chord_seg_uout, 1, 3, (Mend-Mstart), (Nend-Nstart)+1);
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
UVLM::Types::Vector3 temp_uout;
for (uint i=Mstart; i<Mend; ++i)
{
for (uint j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](i, j),
zeta[1](i, j),
zeta[2](i, j);
v2 << zeta[0](i, j+1),
zeta[1](i, j+1),
zeta[2](i, j+1);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
span_seg_uout[0][0](i,j) = temp_uout(0);
span_seg_uout[0][1](i,j) = temp_uout(1);
span_seg_uout[0][2](i,j) = temp_uout(2);
// Streamwise/chordwise vortices
v2 << zeta[0](i+1, j),
zeta[1](i+1, j),
zeta[2](i+1, j);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
chord_seg_uout[0][0](i,j) = temp_uout(0);
chord_seg_uout[0][1](i,j) = temp_uout(1);
chord_seg_uout[0][2](i,j) = temp_uout(2);
}
}
// Influence of the last spanwise vortex
for (uint j=Nstart; j<Nend; j++)
{
v1 << zeta[0](Mend, j),
zeta[1](Mend, j),
zeta[2](Mend, j);
v2 << zeta[0](Mend, j+1),
zeta[1](Mend, j+1),
zeta[2](Mend, j+1);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
span_seg_uout[0][0](Mend,j) = temp_uout(0);
span_seg_uout[0][1](Mend,j) = temp_uout(1);
span_seg_uout[0][2](Mend,j) = temp_uout(2);
}
// Influence of the last chordwise vortex
for (uint i=Mstart; i<Mend; i++)
{
v1 << zeta[0](i, Nend),
zeta[1](i, Nend),
zeta[2](i, Nend);
v2 << zeta[0](i+1, Nend),
zeta[1](i+1, Nend),
zeta[2](i+1, Nend);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
chord_seg_uout[0][0](i,Nend) = temp_uout(0);
chord_seg_uout[0][1](i,Nend) = temp_uout(1);
chord_seg_uout[0][2](i,Nend) = temp_uout(2);
}
// Transfer influence from segments to vortices
for (uint i=Mstart; i<Mend; i++)
{
for (uint j=Nstart; j<Nend; j++)
{
for (uint i_dim=0; i_dim<UVLM::Constants::NDIM; ++i_dim)
{
uout[i_dim](i,j) -= span_seg_uout[0][i_dim](i,j)*gamma(i,j);
uout[i_dim](i,j) += span_seg_uout[0][i_dim](i+1,j)*gamma(i,j);
uout[i_dim](i,j) += chord_seg_uout[0][i_dim](i,j)*gamma(i,j);
uout[i_dim](i,j) -= chord_seg_uout[0][i_dim](i,j+1)*gamma(i,j);
}
// std::cout << i << " " << j << " "<< span_seg_uout[0][0](i,j) << " "<< span_seg_uout[0][1](i,j) << " "<< span_seg_uout[0][2](i,j) << std::endl;
// std::cout << i << " " << j << " "<< chord_seg_uout[0][0](i,j) << " "<< chord_seg_uout[0][1](i,j) << " "<< chord_seg_uout[0][2](i,j) << std::endl;
// std::cout << i << " " << j << " "<< uout[0](i,j) << " "<< uout[1](i,j) << " "<< uout[2](i,j) << std::endl;
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface_with_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
const bool& horseshoe,
t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint Mstart = 0;
const uint Nstart = 0;
const uint Mend = gamma.rows();
const uint Nend = gamma.cols();
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
uout,
Mstart,
Nstart,
Mend,
Nend,
image_method,
vortex_radius);
const uint i0 = 0;
const uint i = Mend - 1;
if (horseshoe)
{
UVLM::Types::Vector3 temp_uout;
for (unsigned int j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
UVLM::BiotSavart::horseshoe(target_triad,
zeta_star[0].template block<2,2>(i0,j),
zeta_star[1].template block<2,2>(i0,j),
zeta_star[2].template block<2,2>(i0,j),
gamma_star(i0,j),
temp_uout,
vortex_radius);
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
} else
{
const uint mstar = gamma_star.rows();
UVLM::Types::Vector3 temp_uout;
for (unsigned int j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
// #pragma omp parallel for collapse(1) reduction(sum_Vector3: temp_uout)
for (uint i_star=0; i_star<mstar; ++i_star)
{
temp_uout += UVLM::BiotSavart::vortex_ring(target_triad,
zeta_star[0].template block<2,2>(i_star, j),
zeta_star[1].template block<2,2>(i_star, j),
zeta_star[2].template block<2,2>(i_star, j),
gamma_star(i_star, j),
vortex_radius);
// temp_uout);
}
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface_with_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
t_uout& uout,
const bool& image_method,
const int& n_rows,
const UVLM::Types::Real& vortex_radius
)
{
const uint Mstart = 0;
const uint Nstart = 0;
const uint Mend = gamma.rows();
const uint Nend = gamma.cols();
// UVLM::Types::Vector3 temp_uout;
// Surface contribution
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
uout,
Mstart,
Nstart,
Mend,
Nend,
image_method,
vortex_radius);
// wake contribution
// n_rows controls the number of panels that are included
// in the final result. Usually for unsteady wake, the value
// will be 1 when computing AIC coeffs.
// unless if gamma_star is a dummy one, just a row with ones.
const uint mstar = (n_rows == -1) ? gamma_star.rows():n_rows;
const uint i = Mend - 1;
UVLM::Types::Vector3 temp_uout;
for (uint j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
// #pragma omp parallel for collapse(1) reduction(sum_Vector3: temp_uout)
for (uint i_star=0; i_star<mstar; ++i_star)
{
// std::cout << "WARNING: this should not be computed" << std::endl;
temp_uout += UVLM::BiotSavart::vortex_ring(target_triad,
zeta_star[0].template block<2,2>(i_star, j),
zeta_star[1].template block<2,2>(i_star, j),
zeta_star[2].template block<2,2>(i_star, j),
gamma_star(i_star, j),
vortex_radius);
// temp_uout);
}
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
}
template <typename t_zeta,
typename t_gamma,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
// int collocation_counter = -1;
// int surface_counter;
unsigned int surf_rows = gamma.rows();
unsigned int surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
temp_uout,
0,
0,
gamma.rows(),
gamma.cols(),
vortex_radius);
// surface_counter = -1;
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = j_surf + i_surf*surf_cols;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
const bool& horseshoe,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
// int surface_counter;
const uint surf_rows = gamma.rows();
const uint surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface_with_steady_wake(zeta,
zeta_star,
gamma,
gamma_star,
target_triad,
horseshoe,
temp_uout,
vortex_radius
);
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = i_surf*surf_cols + j_surf;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const int& n_rows,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::Vector3 target_triad;
// int surface_counter;
unsigned int surf_rows = gamma.rows();
unsigned int surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface_with_unsteady_wake(zeta,
zeta_star,
gamma,
gamma_star,
target_triad,
temp_uout,
image_method,
n_rows,
vortex_radius
);
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = i_surf*surf_cols + j_surf;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
// template <typename t_zeta,
// typename t_gamma,
// typename t_zeta_col,
// typename t_u_ind>
// void UVLM::BiotSavart::multisurface_on_multisurface
// (
// const t_zeta& zeta,
// const t_gamma& gamma,
// const t_zeta_col& zeta_col,
// const bool image_method,
// t_u_ind& u_ind
// )
// {
//
// }
template <typename t_zeta,
typename t_gamma,
typename t_zeta_col,
typename t_u_ind>
void UVLM::BiotSavart::whole_surface_on_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_zeta_col& zeta_col,
t_u_ind& u_ind,
const bool image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint col_n_M = zeta_col[0].rows();
const uint col_n_N = zeta_col[0].cols();
#pragma omp parallel for collapse(2)
for (uint col_i_M=0; col_i_M<col_n_M; ++col_i_M)
{
for (uint col_j_N=0; col_j_N<col_n_N; ++col_j_N)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::Vector3 uout;
target_triad << zeta_col[0](col_i_M, col_j_N),
zeta_col[1](col_i_M, col_j_N),
zeta_col[2](col_i_M, col_j_N);
uout = UVLM::BiotSavart::whole_surface
(
zeta,
gamma,
target_triad,
// uout,
image_method,
vortex_radius
);
u_ind[0](col_i_M, col_j_N) += uout(0);
u_ind[1](col_i_M, col_j_N) += uout(1);
u_ind[2](col_i_M, col_j_N) += uout(2);
}
}
}
template <typename t_zeta,
typename t_gamma,
typename t_ttriad>
// typename t_uout>
UVLM::Types::Vector3 UVLM::BiotSavart::whole_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
// t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius,
unsigned int Mstart,
unsigned int Nstart
)
{
// If Mend or Nend are == -1, their values are taken as the surface M and N
uint Mend = gamma.rows();
uint Nend = gamma.cols();
UVLM::Types::Vector3 uout;
uout.setZero();
UVLM::Types::Vector3 temp_uout;
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
UVLM::Types::Real delta_gamma;
for (unsigned int i=Mstart; i<Mend; ++i)
{
for (unsigned int j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](i, j),
zeta[1](i, j),
zeta[2](i, j);
v2 << zeta[0](i, j+1),
zeta[1](i, j+1),
zeta[2](i, j+1);
if (i == Mstart){
delta_gamma = gamma(i, j);
} else {
delta_gamma = gamma(i, j) - gamma(i-1, j);
}
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-delta_gamma,
vortex_radius);
// Streamwise/chordwise vortices
v2 << zeta[0](i+1, j),
zeta[1](i+1, j),
zeta[2](i+1, j);
if (j == Nstart){
delta_gamma = -gamma(i, j);
} else {
delta_gamma = gamma(i, j-1) - gamma(i, j);
}
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-delta_gamma,
vortex_radius);
}
}
for (unsigned int j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](Mend, j),
zeta[1](Mend, j),
zeta[2](Mend, j);
v2 << zeta[0](Mend, j+1),
zeta[1](Mend, j+1),
zeta[2](Mend, j+1);
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma(Mend-1,j),
vortex_radius);
}
for (unsigned int i=Mstart; i<Mend; ++i)
{
// Streamwise/chordwise vortices
v1 << zeta[0](i, Nend),
zeta[1](i, Nend),
zeta[2](i, Nend);
v2 << zeta[0](i+1, Nend),
zeta[1](i+1, Nend),
zeta[2](i+1, Nend);
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-gamma(i, Nend-1),
vortex_radius);
}
return uout;
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_uout>
void UVLM::BiotSavart::total_induced_velocity_on_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint n_surf = zeta.size();
for (uint col_i_surf=0; col_i_surf<n_surf; ++col_i_surf)
{
for (uint i_surf=0; i_surf<n_surf; ++i_surf)
{
// wake on wake
UVLM::BiotSavart::whole_surface_on_surface
(
zeta_star[i_surf],
gamma_star[i_surf],
zeta_star[col_i_surf],
uout[col_i_surf],
image_method,
vortex_radius
);
// surface on wake
UVLM::BiotSavart::whole_surface_on_surface
(
zeta[i_surf],
gamma[i_surf],
zeta_star[col_i_surf],
uout[col_i_surf],
image_method,
vortex_radius
);
}
}
}
template <typename t_ttriad,
typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star>
UVLM::Types::Vector3 UVLM::BiotSavart::total_induced_velocity_on_point
(
const t_ttriad& target_triad,
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uout;
uout.setZero();
const uint n_surf = zeta.size();
for (uint i_surf=0; i_surf<n_surf; ++i_surf)
{
// wake on point
uout += UVLM::BiotSavart::whole_surface
(
zeta_star[i_surf],
gamma_star[i_surf],
target_triad,
// sum_uout,
image_method,
vortex_radius
);
// surface on point
uout += UVLM::BiotSavart::whole_surface
(
zeta[i_surf],
gamma[i_surf],
target_triad,
// sum_uout,
image_method,
vortex_radius
);
}
return uout;
}
namespace UVLMlin{
const double PI = UVLM::Constants::PI;
const double PIquart = UVLM::Constants::INV_PI4;
const int svec[Nvert]={0, 1, 2, 3}; // seg. no.
const int avec[Nvert]={0, 1, 2, 3}; // seg. no.
const int bvec[Nvert]={1, 2, 3, 0}; // seg. no.
const int dm[Nvert]={0,1,1,0};
const int dn[Nvert]={0,0,1,1};
void biot_panel_map( map_RowVec3& velP,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius){
/*
This implementation works with mapping objects.
*/
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double vcr2;
RowVector3d RAB, Vcr;
Vector3d Vsc;
Vector4d RABsq;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
Runit.row(ii)=R.row(ii)/R.row(ii).norm();
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
aa=avec[ii];
bb=bvec[ii];
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)) continue;
velP += ((Cbiot/vcr2) * RAB.dot(Runit.row(aa)-Runit.row(bb))) *Vcr;
}
}
// -----------------------------------------------------------------------------
void der_biot_panel( Matrix3d& DerP, Matrix3d DerVertices[Nvert],
const RowVector3d zetaP, const Matrix4by3d ZetaPanel, const double gamma, double vortex_radius){
/* This implementation is no suitable for python interface */
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double r1inv, vcr2, vcr2inv, vcr4inv, dotprod, diag_fact, off_fact;
RowVector3d RAB, Vcr, Tv;
Vector3d Vsc;
Matrix3d Dvcross, Ddiff, dQ_dRA, dQ_dRB, dQ_dRAB;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
Matrix3d Array_Der_runit[Nvert]; // as a static arrays (we know size)
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
r1inv=1./R.row(ii).norm();
Runit.row(ii)=R.row(ii)*r1inv;
der_runit( Array_Der_runit[ii], R.row(ii), r1inv, -std::pow(r1inv,3) );
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
// vertices indices
aa=avec[ii];
bb=bvec[ii];
// utility vars
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)){
//cout << endl << "Skipping seg. " << ii << endl;
continue;}
Tv=Runit.row(aa)-Runit.row(bb);
dotprod=RAB.dot(Tv);
// ------------------------------------------ cross-product derivatives
// lower triangular part only
vcr2inv=1./vcr2;
vcr4inv=vcr2inv*vcr2inv;
diag_fact= Cbiot*vcr2inv*dotprod;
off_fact =-2.*Cbiot*vcr4inv*dotprod;
Dvcross(0,0)=diag_fact+off_fact*Vcr[0]*Vcr[0];
Dvcross(1,0)=off_fact*Vcr[0]*Vcr[1];
Dvcross(1,1)=diag_fact+off_fact*Vcr[1]*Vcr[1];
Dvcross(2,0)=off_fact*Vcr[0]*Vcr[2];
Dvcross(2,1)=off_fact*Vcr[1]*Vcr[2];
Dvcross(2,2)= diag_fact+off_fact*Vcr[2]*Vcr[2];
// ------------------------------- difference and RAB terms derivatives
Vsc=Vcr.transpose()*vcr2inv*Cbiot;
Ddiff=Vsc*RAB;
dQ_dRAB=Vsc*Tv;
// ----------------------------------------------------- Final assembly
dQ_dRA=Dvcross_by_skew3d(Dvcross,-R.row(bb))+Ddiff*Array_Der_runit[aa];
dQ_dRB=Dvcross_by_skew3d(Dvcross, R.row(aa))-Ddiff*Array_Der_runit[bb];
DerP += dQ_dRA + dQ_dRB;
DerVertices[aa] -= dQ_dRAB + dQ_dRA;
DerVertices[bb] += dQ_dRAB - dQ_dRB;
}
}
void der_biot_panel_map( map_Mat3by3& DerP,
Vec_map_Mat3by3& DerVertices,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius){
/*
This implementation works with mapping objects.
*/
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double r1inv, vcr2, vcr2inv, vcr4inv, dotprod, diag_fact, off_fact;
RowVector3d RAB, Vcr, Tv;
Vector3d Vsc;
Matrix3d Dvcross, Ddiff, dQ_dRA, dQ_dRB, dQ_dRAB;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
Matrix3d Array_Der_runit[Nvert]; // as a static arrays (we know size)
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
r1inv=1./R.row(ii).norm();
Runit.row(ii)=R.row(ii)*r1inv;
der_runit( Array_Der_runit[ii], R.row(ii), r1inv, -std::pow(r1inv,3) );
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
// vertices indices
aa=avec[ii];
bb=bvec[ii];
// utility vars
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)){
//cout << endl << "Skipping seg. " << ii << endl;
continue;}
Tv=Runit.row(aa)-Runit.row(bb);
dotprod=RAB.dot(Tv);
// ------------------------------------------ cross-product derivatives
// lower triangular part only
vcr2inv=1./vcr2;
vcr4inv=vcr2inv*vcr2inv;
diag_fact= Cbiot*vcr2inv*dotprod;
off_fact =-2.*Cbiot*vcr4inv*dotprod;
Dvcross(0,0)=diag_fact+off_fact*Vcr[0]*Vcr[0];
Dvcross(1,0)=off_fact*Vcr[0]*Vcr[1];
Dvcross(1,1)=diag_fact+off_fact*Vcr[1]*Vcr[1];
Dvcross(2,0)=off_fact*Vcr[0]*Vcr[2];
Dvcross(2,1)=off_fact*Vcr[1]*Vcr[2];
Dvcross(2,2)= diag_fact+off_fact*Vcr[2]*Vcr[2];
// ------------------------------- difference and RAB terms derivatives
Vsc=Vcr.transpose()*vcr2inv*Cbiot;
Ddiff=Vsc*RAB;
dQ_dRAB=Vsc*Tv;
// ----------------------------------------------------- Final assembly
dQ_dRA=Dvcross_by_skew3d(Dvcross,-R.row(bb))+Ddiff*Array_Der_runit[aa];
dQ_dRB=Dvcross_by_skew3d(Dvcross, R.row(aa))-Ddiff*Array_Der_runit[bb];
//cout << endl << "dQ_dRA = " << endl << dQ_dRA << endl;
DerP += dQ_dRA + dQ_dRB;
DerVertices[aa] -= dQ_dRAB + dQ_dRA;
DerVertices[bb] += dQ_dRAB - dQ_dRB;
}
/* cout << "vcr2=" << vcr2 << endl;
cout << "Tv=" << Tv << endl;
cout << "dotprod=" << dotprod << endl;
cout << "dQ_dRB=" << dQ_dRB << endl;
*/
}
// -----------------------------------------------------------------------------
// Sub-functions
void der_runit(Matrix3d& Der,const RowVector3d& rv, double rinv,double minus_rinv3){
/*Warning:
1. RowVector3d needs to defined as constant if in main code RowVector
is a row of a matrix.
2. The function will fail is Matrix3d is a sub-block of a matrix.
*/
// alloc upper diagonal part
Der(0,0)=rinv+minus_rinv3*rv(0)*rv(0);
Der(0,1)= minus_rinv3*rv(0)*rv(1);
Der(0,2)= minus_rinv3*rv(0)*rv(2);
Der(1,1)=rinv+minus_rinv3*rv(1)*rv(1);
Der(1,2)= minus_rinv3*rv(1)*rv(2);
Der(2,2)=rinv+minus_rinv3*rv(2)*rv(2);
// alloc lower diag
Der(1,0)=Der(0,1);
Der(2,0)=Der(0,2);
Der(2,1)=Der(1,2);
}
Matrix3d Dvcross_by_skew3d(const Matrix3d& Dvcross, const RowVector3d& rv){
/*Warning:
1. RowVector3d needs to defined as constant if in main code RowVector
is a row of a matrix.
*/
Matrix3d P;
P(0,0)=Dvcross(1,0)*rv(2)-Dvcross(2,0)*rv(1);
P(0,1)=Dvcross(2,0)*rv(0)-Dvcross(0,0)*rv(2);
P(0,2)=Dvcross(0,0)*rv(1)-Dvcross(1,0)*rv(0);
//
P(1,0)=P(0,1);
P(1,1)=Dvcross(2,1)*rv(0)-Dvcross(1,0)*rv(2);
P(1,2)=Dvcross(1,0)*rv(1)-Dvcross(1,1)*rv(0);
//
P(2,0)=P(0,2);
P(2,1)=P(1,2);
P(2,2)=Dvcross(2,0)*rv(1)-Dvcross(2,1)*rv(0);
return P;
}
// -----------------------------------------------------------------------------
void dvinddzeta(map_Mat3by3 DerC,
map_Mat DerV,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
int& Kzeta_in,
bool& IsBound,
int& M_in_bound, // M of bound surf associated
int& Kzeta_in_bound,
double vortex_radius
)
{
int cc, vv, mm, nn, jj, cc_in; //pp
//int Kin=M_in*N_in;
// below defined as maps to keep compatibility with der-biot_panel_map
//Matrix4by3d ZetaPanel_in;
//Matrix3d derv[Nvert];
double p_ZetaPanel_in[12];
double p_derv[36];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
Vec_map_Mat3by3 derv;
for(vv=0;vv<4;vv++) derv.push_back( map_Mat3by3(p_derv+9*vv) );
/* cout << "Kzeta_in=" << endl << Kzeta_in << endl;
cout << "DerV = " << endl << DerV << endl;
cout << "GammaIn = " << endl << GammaIn << endl;
for(cc=0;cc<3;cc++){
cout << "ZetaIn[" << cc << "] = " << endl << ZetaIn[cc] << endl;
}*/
if (IsBound){// ------------------------------------------------ Bound case
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// init. local derivatives
for(vv=0; vv<Nvert; vv++) derv[vv].setZero();
// get local deriv
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
//for(vv=0; vv<Nvert; vv++) cout << derv[vv] << endl;
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
for(cc_in=0; cc_in<3; cc_in++){
jj= cc_in*Kzeta_in + (mm+dm[vv])*(N_in+1) + (nn+dn[vv]);
DerV(cc,jj)+=derv[vv](cc,cc_in);
}
}
}
}
}
} else{ // ------------------------------------------------------ Wake case
// scan TE first
mm=0;
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// init. local derivatives. only vertices 0 and 3 are on TE
derv[0].setZero();
derv[3].setZero();
// get local deriv
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
for(cc=0; cc<3; cc++){
for(cc_in=0; cc_in<3; cc_in++){
// vv=0
jj= cc_in*Kzeta_in_bound + M_in_bound*(N_in+1) + (nn);
DerV(cc,jj)+=derv[0](cc,cc_in);
// vv=3
jj= cc_in*Kzeta_in_bound + M_in_bound*(N_in+1) + (nn+1);
DerV(cc,jj)+=derv[3](cc,cc_in);
}
}
}
// Loop other panels (mm,nn) for colloc point
for (mm=1; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// update DerC
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
}// loop nn
}// loop mm
}// if-else
}
void aic3( map_Mat AIC3,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
int& M_in,
int& N_in,
double vortex_radius)
{
int mm,nn,cc,vv;
double p_ZetaPanel_in[12];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
double p_vel[3];
map_RowVec3 vel(p_vel);
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
vel.setZero();
biot_panel_map( vel, zetaC, ZetaPanel_in, 1.0, vortex_radius);
AIC3.col(mm*N_in+nn)=vel;
}
}
}
void ind_vel(map_RowVec3 velC,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
double vortex_radius)
{
int mm,nn,cc,vv;
double p_ZetaPanel_in[12];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
biot_panel_map( velC, zetaC, ZetaPanel_in, GammaIn(mm,nn), vortex_radius);
}
}
}
}
|
nvptx_device_cmath_functions.c | // Test calling of device math functions.
///==========================================================================///
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -internal-isystem %S/Inputs/include -fopenmp -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -internal-isystem %S/../../lib/Headers/openmp_wrappers -include __clang_openmp_device_functions.h -internal-isystem %S/Inputs/include -fopenmp -triple nvptx64-nvidia-cuda -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -check-prefix CHECK-YES %s
#include <stdlib.h>
#include <math.h>
void test_sqrt(double a1) {
#pragma omp target
{
// CHECK-YES: call double @__nv_sqrt(double
double l1 = sqrt(a1);
// CHECK-YES: call double @__nv_pow(double
double l2 = pow(a1, a1);
// CHECK-YES: call double @__nv_modf(double
double l3 = modf(a1 + 3.5, &a1);
// CHECK-YES: call double @__nv_fabs(double
double l4 = fabs(a1);
// CHECK-YES: call i32 @__nv_abs(i32
double l5 = abs((int)a1);
}
}
|
parallel-reduction-nowait.c | /*
* parallel-reduction-nowait.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0, i;
int sum1 = 0;
int sum2 = 0;
// Number of threads is empirical: We need enough threads so that
// the reduction is really performed hierarchically in the barrier!
#pragma omp parallel num_threads(5) reduction(+ : var)
{
#pragma omp for schedule(static) nowait reduction(+ : sum1)
for (i = 0; i < 5; i++)
sum1 += i;
#pragma omp for schedule(static) reduction(+ : sum2)
for (i = 0; i < 5; i++)
sum2 += i;
var = sum1 + sum2;
}
fprintf(stderr, "DONE\n");
int error = (var != 100);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
phylokernel.h | /*
* phylokernel.h
*
* Created on: Dec 14, 2014
* Author: minh
*/
#ifndef PHYLOKERNEL_H_
#define PHYLOKERNEL_H_
#include "phylotree.h"
//#include "vectorclass/vectorclass.h"
//#include "vectorclass/vectormath_exp.h"
#include "alignment/superalignment.h"
#ifdef __SSE2__
inline Vec2d horizontal_add(Vec2d x[2]) {
#if INSTRSET >= 3 // SSE3
return _mm_hadd_pd(x[0],x[1]);
#elif INSTRSET >= 2
Vec2d help0 = _mm_shuffle_pd(x[0], x[1], _MM_SHUFFLE2(0,0));
Vec2d help1 = _mm_shuffle_pd(x[0], x[1], _MM_SHUFFLE2(1,1));
return _mm_add_pd(help0, help1);
#else
#error "You must compile with SSE2 enabled!"
#endif
}
inline double horizontal_max(Vec2d const &a) {
double x[2];
a.store(x);
return max(x[0],x[1]);
}
#endif
#ifdef __AVX__
inline Vec4d horizontal_add(Vec4d x[4]) {
// {a[0]+a[1], b[0]+b[1], a[2]+a[3], b[2]+b[3]}
__m256d sumab = _mm256_hadd_pd(x[0], x[1]);
// {c[0]+c[1], d[0]+d[1], c[2]+c[3], d[2]+d[3]}
__m256d sumcd = _mm256_hadd_pd(x[2], x[3]);
// {a[0]+a[1], b[0]+b[1], c[2]+c[3], d[2]+d[3]}
__m256d blend = _mm256_blend_pd(sumab, sumcd, 12/* 0b1100*/);
// {a[2]+a[3], b[2]+b[3], c[0]+c[1], d[0]+d[1]}
__m256d perm = _mm256_permute2f128_pd(sumab, sumcd, 0x21);
return _mm256_add_pd(perm, blend);
}
inline double horizontal_max(Vec4d const &a) {
__m128d high = _mm256_extractf128_pd(a,1);
__m128d m = _mm_max_pd(_mm256_castpd256_pd128(a), high);
double x[2];
_mm_storeu_pd(x, m);
return max(x[0],x[1]);
}
#endif // __AVX__
template <class Numeric, class VectorClass>
Numeric PhyloTree::dotProductSIMD(Numeric *x, Numeric *y, int size) {
VectorClass res = VectorClass().load_a(x) * VectorClass().load_a(y);
for (int i = VectorClass::size(); i < size; i += VectorClass::size())
res = mul_add(VectorClass().load_a(&x[i]), VectorClass().load_a(&y[i]), res);
return horizontal_add(res);
}
/************************************************************************************************
*
* Highly optimized vectorized versions of likelihood functions
*
*************************************************************************************************/
/*
template <class VectorClass, const int VCSIZE, const int nstates>
void PhyloTree::computePartialLikelihoodEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) {
// don't recompute the likelihood
assert(dad);
if (dad_branch->partial_lh_computed & 1)
return;
dad_branch->partial_lh_computed |= 1;
num_partial_lh_computations++;
size_t nptn = aln->size() + model_factory->unobserved_ptns.size();
PhyloNode *node = (PhyloNode*)(dad_branch->node);
if (!tip_partial_lh_computed)
computeTipPartialLikelihood();
if (node->isLeaf()) {
dad_branch->lh_scale_factor = 0.0;
//memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE));
return;
}
size_t ptn, c;
size_t orig_nptn = aln->size();
size_t ncat = site_rate->getNRate();
size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures();
assert(nstates == aln->num_states && nstates >= VCSIZE && VCSIZE == VectorClass().size());
assert(model->isReversible()); // only works with reversible model!
const size_t nstatesqr=nstates*nstates;
size_t i, x, j;
size_t block = nstates * ncat_mix;
size_t tip_block = nstates * model->getNMixtures();
size_t mix_addr_nstates[ncat_mix], mix_addr[ncat_mix];
size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat;
for (c = 0; c < ncat_mix; c++) {
size_t m = c/denom;
mix_addr_nstates[c] = m*nstates;
mix_addr[c] = m*nstatesqr;
}
// internal node
dad_branch->lh_scale_factor = 0.0;
PhyloNeighbor *left = NULL, *right = NULL; // left & right are two neighbors leading to 2 subtrees
int num_leaves = 0;
FOR_NEIGHBOR_IT(node, dad, it) {
PhyloNeighbor *nei = (PhyloNeighbor*)*it;
if (!left) left = (PhyloNeighbor*)(*it); else right = (PhyloNeighbor*)(*it);
if ((nei->partial_lh_computed & 1) == 0)
computePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(nei, node);
dad_branch->lh_scale_factor += nei->lh_scale_factor;
if ((*it)->node->isLeaf()) num_leaves++;
}
if (params->lh_mem_save == LM_PER_NODE && !dad_branch->partial_lh) {
// re-orient partial_lh
bool done = false;
FOR_NEIGHBOR_IT(node, dad, it2) {
PhyloNeighbor *backnei = ((PhyloNeighbor*)(*it2)->node->findNeighbor(node));
if (backnei->partial_lh) {
dad_branch->partial_lh = backnei->partial_lh;
dad_branch->scale_num = backnei->scale_num;
backnei->partial_lh = NULL;
backnei->scale_num = NULL;
backnei->partial_lh_computed &= ~1; // clear bit
done = true;
break;
}
}
assert(done && "partial_lh is not re-oriented");
}
double *evec = model->getEigenvectors();
double *inv_evec = model->getInverseEigenvectors();
assert(inv_evec && evec);
// for (i = 0; i < tip_block; i++) {
// for (x = 0; x < nstates/VCSIZE; x++)
// // inv_evec is not aligned!
// vc_inv_evec[i*nstates/VCSIZE+x].load_a(&inv_evec[i*nstates+x*VCSIZE]);
// }
double *eval = model->getEigenvalues();
VectorClass *echildren = aligned_alloc<VectorClass>(block*nstates/VCSIZE*(node->degree()-1));
double *partial_lh_leaves = NULL;
if (num_leaves > 0)
partial_lh_leaves = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block*num_leaves);
VectorClass *echild = echildren;
double *partial_lh_leaf = partial_lh_leaves;
FOR_NEIGHBOR_IT(node, dad, it) {
VectorClass expchild[nstates/VCSIZE];
PhyloNeighbor *child = (PhyloNeighbor*)*it;
VectorClass *echild_ptr = echild;
// precompute information buffer
for (c = 0; c < ncat_mix; c++) {
VectorClass len_child = site_rate->getRate(c%ncat) * child->length;
double *eval_ptr = eval + mix_addr_nstates[c];
double *evec_ptr = evec + mix_addr[c];
for (i = 0; i < nstates/VCSIZE; i++) {
// eval is not aligned!
expchild[i] = exp(VectorClass().load_a(&eval_ptr[i*VCSIZE]) * len_child);
}
for (x = 0; x < nstates; x++) {
for (i = 0; i < nstates/VCSIZE; i++) {
// evec is not be aligned!
echild_ptr[i] = (VectorClass().load_a(&evec_ptr[x*nstates+i*VCSIZE]) * expchild[i]);
}
echild_ptr += nstates/VCSIZE;
}
}
// pre compute information for tip
if (child->node->isLeaf()) {
vector<int>::iterator it;
for (it = aln->seq_states[child->node->id].begin(); it != aln->seq_states[child->node->id].end(); it++) {
int state = (*it);
double *this_partial_lh_leaf = partial_lh_leaf + state*block;
VectorClass *echild_ptr = echild;
for (c = 0; c < ncat_mix; c++) {
VectorClass *this_tip_partial_lh = (VectorClass*)(tip_partial_lh + state*tip_block + mix_addr_nstates[c]);
for (x = 0; x < nstates; x++) {
VectorClass vchild = 0.0;
for (i = 0; i < nstates/VCSIZE; i++) {
vchild += echild_ptr[i] * this_tip_partial_lh[i];
}
this_partial_lh_leaf[x] = horizontal_add(vchild);
echild_ptr += nstates/VCSIZE;
}
this_partial_lh_leaf += nstates;
}
}
size_t addr = aln->STATE_UNKNOWN * block;
for (x = 0; x < block; x++) {
partial_lh_leaf[addr+x] = 1.0;
}
partial_lh_leaf += (aln->STATE_UNKNOWN+1)*block;
}
echild += block*nstates/VCSIZE;
}
VectorClass *eleft = echildren, *eright = echildren + block*nstates/VCSIZE;
if (!left->node->isLeaf() && right->node->isLeaf()) {
PhyloNeighbor *tmp = left;
left = right;
right = tmp;
VectorClass *etmp = eleft;
eleft = eright;
eright = etmp;
}
if (node->degree() > 3) {
//--------------------- multifurcating node ------------------//
double sum_scale = 0.0;
// now for-loop computing partial_lh over all site-patterns
#ifdef _OPENMP
#pragma omp parallel for reduction(+: sum_scale) private(ptn, c, x, i) schedule(static)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double partial_lh_all[block];
for (i = 0; i < block; i++)
partial_lh_all[i] = 1.0;
dad_branch->scale_num[ptn] = 0;
double *partial_lh_leaf = partial_lh_leaves;
double *echild = (double*)echildren;
FOR_NEIGHBOR_IT(node, dad, it) {
PhyloNeighbor *child = (PhyloNeighbor*)*it;
if (child->node->isLeaf()) {
// external node
int state_child = (ptn < orig_nptn) ? (aln->at(ptn))[child->node->id] : model_factory->unobserved_ptns[ptn-orig_nptn];
double *child_lh = partial_lh_leaf + state_child*block;
for (c = 0; c < block; c++) {
// compute real partial likelihood vector
partial_lh_all[c] *= child_lh[c];
}
partial_lh_leaf += (aln->STATE_UNKNOWN+1)*block;
} else {
// internal node
double *partial_lh = partial_lh_all;
double *partial_lh_child = child->partial_lh + ptn*block;
dad_branch->scale_num[ptn] += child->scale_num[ptn];
double *echild_ptr = echild;
for (c = 0; c < ncat_mix; c++) {
// compute real partial likelihood vector
for (x = 0; x < nstates; x++) {
double vchild = 0.0;
// double *echild_ptr = echild + (c*nstatesqr+x*nstates);
for (i = 0; i < nstates; i++) {
vchild += echild_ptr[i] * partial_lh_child[i];
}
echild_ptr += nstates;
partial_lh[x] *= vchild;
}
partial_lh += nstates;
partial_lh_child += nstates;
}
} // if
echild += block*nstates;
} // FOR_NEIGHBOR
// compute dot-product with inv_eigenvector
double lh_max = 0.0;
double *partial_lh_tmp = partial_lh_all;
double *partial_lh = dad_branch->partial_lh + ptn*block;
for (c = 0; c < ncat_mix; c++) {
double *inv_evec_ptr = inv_evec + mix_addr[c];
for (i = 0; i < nstates; i++) {
double res = 0.0;
for (x = 0; x < nstates; x++) {
res += partial_lh_tmp[x]*inv_evec_ptr[x];
}
inv_evec_ptr += nstates;
partial_lh[i] = res;
lh_max = max(lh_max, fabs(res));
}
partial_lh += nstates;
partial_lh_tmp += nstates;
}
// check if one should scale partial likelihoods
if (lh_max < SCALING_THRESHOLD) {
partial_lh = dad_branch->partial_lh + ptn*block;
if (lh_max == 0.0) {
// for very shitty data
for (c = 0; c < ncat_mix; c++)
memcpy(&partial_lh[c*nstates], &tip_partial_lh[aln->STATE_UNKNOWN*nstates], nstates*sizeof(double));
sum_scale += LOG_SCALING_THRESHOLD* 4 * ptn_freq[ptn];
//sum_scale += log(lh_max) * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 4;
int nsite = aln->getNSite();
for (i = 0, x = 0; i < nsite && x < ptn_freq[ptn]; i++)
if (aln->getPatternID(i) == ptn) {
outWarning((string)"Numerical underflow for site " + convertIntToString(i+1));
x++;
}
} else if (ptn_invar[ptn] == 0.0) {
// now do the likelihood scaling
for (i = 0; i < block; i++) {
partial_lh[i] *= SCALING_THRESHOLD_INVER;
//partial_lh[i] /= lh_max;
}
// unobserved const pattern will never have underflow
sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn];
//sum_scale += log(lh_max) * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 1;
}
}
} // for ptn
dad_branch->lh_scale_factor += sum_scale;
// end multifurcating treatment
} else if (left->node->isLeaf() && right->node->isLeaf()) {
// special treatment for TIP-TIP (cherry) case
// pre compute information for both tips
double *partial_lh_left = partial_lh_leaves;
double *partial_lh_right = partial_lh_leaves + (aln->STATE_UNKNOWN+1)*block;
// assign pointers for left and right partial_lh
// double **lh_left_ptr = aligned_alloc<double*>(nptn);
// double **lh_right_ptr = aligned_alloc<double*>(nptn);
// for (ptn = 0; ptn < orig_ntn; ptn++) {
// lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
// lh_right_ptr[ptn] = &partial_lh_right[block * (aln->at(ptn))[right->node->id]];
// }
// for (ptn = orig_ntn; ptn < nptn; ptn++) {
// lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
// lh_right_ptr[ptn] = &partial_lh_right[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
// }
// scale number must be ZERO
memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE));
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass res[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for private(ptn, c, x, i, j, vc_partial_lh_tmp, res)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *lh_left;
double *lh_right;
if (ptn < orig_nptn) {
lh_left = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
lh_right = &partial_lh_right[block * (aln->at(ptn))[right->node->id]];
} else {
lh_left = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_nptn]];
lh_right = &partial_lh_right[block * model_factory->unobserved_ptns[ptn-orig_nptn]];
}
for (c = 0; c < ncat_mix; c++) {
VectorClass *vc_inv_evec_ptr = (VectorClass*)(inv_evec + mix_addr[c]);
// compute real partial likelihood vector
for (x = 0; x < nstates/VCSIZE; x++) {
vc_partial_lh_tmp[x] = (VectorClass().load_a(&lh_left[x*VCSIZE]) * VectorClass().load_a(&lh_right[x*VCSIZE]));
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * vc_inv_evec_ptr[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++)
for (j = 0; j < VCSIZE; j++) {
res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec_ptr[(i+j)*nstates/VCSIZE+x], res[j]);
}
horizontal_add(res).store_a(&partial_lh[i]);
}
lh_left += nstates;
lh_right += nstates;
partial_lh += nstates;
}
}
//aligned_free(lh_right_ptr);
//aligned_free(lh_left_ptr);
} else if (left->node->isLeaf() && !right->node->isLeaf()) {
// special treatment to TIP-INTERNAL NODE case
// only take scale_num from the right subtree
memcpy(dad_branch->scale_num, right->scale_num, nptn * sizeof(UBYTE));
// pre compute information for left tip
double *partial_lh_left = partial_lh_leaves;
// assign pointers for partial_lh_left
// double **lh_left_ptr = aligned_alloc<double*>(nptn);
// for (ptn = 0; ptn < orig_ntn; ptn++) {
// lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
// }
// for (ptn = orig_ntn; ptn < nptn; ptn++) {
// lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]];
// }
double sum_scale = 0.0;
VectorClass vc_lh_right[nstates/VCSIZE];
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass res[VCSIZE];
VectorClass vc_max; // maximum of partial likelihood, for scaling check
VectorClass vright[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for reduction(+: sum_scale) private (ptn, c, x, i, j, vc_lh_right, vc_partial_lh_tmp, res, vc_max, vright)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *partial_lh_right = right->partial_lh + ptn*block;
double *lh_left;
if (ptn < orig_nptn) {
lh_left = &partial_lh_left[block * (aln->at(ptn))[left->node->id]];
} else {
lh_left = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_nptn]];
}
vc_max = 0.0;
for (c = 0; c < ncat_mix; c++) {
VectorClass *vc_inv_evec_ptr = (VectorClass*)(inv_evec + mix_addr[c]);
// compute real partial likelihood vector
for (i = 0; i < nstates/VCSIZE; i++)
vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]);
for (x = 0; x < nstates/VCSIZE; x++) {
size_t addr = c*nstatesqr/VCSIZE+x*nstates;
for (j = 0; j < VCSIZE; j++) {
vright[j] = eright[addr+nstates*j/VCSIZE] * vc_lh_right[0];
}
for (i = 1; i < nstates/VCSIZE; i++)
for (j = 0; j < VCSIZE; j++) {
vright[j] = mul_add(eright[addr+i+nstates*j/VCSIZE], vc_lh_right[i], vright[j]);
}
vc_partial_lh_tmp[x] = VectorClass().load_a(&lh_left[x*VCSIZE])
* horizontal_add(vright);
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * vc_inv_evec_ptr[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++) {
for (j = 0; j < VCSIZE; j++) {
res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec_ptr[(i+j)*nstates/VCSIZE+x], res[j]);
}
}
VectorClass sum_res = horizontal_add(res);
sum_res.store_a(&partial_lh[i]);
vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check
}
lh_left += nstates;
partial_lh_right += nstates;
partial_lh += nstates;
}
// check if one should scale partial likelihoods
double lh_max = horizontal_max(vc_max);
if (lh_max < SCALING_THRESHOLD && ptn_invar[ptn] == 0.0) {
// now do the likelihood scaling
partial_lh -= block; // revert its pointer
VectorClass scale_thres(SCALING_THRESHOLD_INVER);
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]);
}
// unobserved const pattern will never have underflow
sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 1;
partial_lh += block; // increase the pointer again
}
}
dad_branch->lh_scale_factor += sum_scale;
//aligned_free(lh_left_ptr);
} else {
// both left and right are internal node
double sum_scale = 0.0;
VectorClass vc_max; // maximum of partial likelihood, for scaling check
VectorClass vc_partial_lh_tmp[nstates/VCSIZE];
VectorClass vc_lh_left[nstates/VCSIZE], vc_lh_right[nstates/VCSIZE];
VectorClass res[VCSIZE];
VectorClass vleft[VCSIZE], vright[VCSIZE];
#ifdef _OPENMP
#pragma omp parallel for reduction (+: sum_scale) private(ptn, c, x, i, j, vc_max, vc_partial_lh_tmp, vc_lh_left, vc_lh_right, res, vleft, vright)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh = dad_branch->partial_lh + ptn*block;
double *partial_lh_left = left->partial_lh + ptn*block;
double *partial_lh_right = right->partial_lh + ptn*block;
dad_branch->scale_num[ptn] = left->scale_num[ptn] + right->scale_num[ptn];
vc_max = 0.0;
for (c = 0; c < ncat_mix; c++) {
VectorClass *vc_inv_evec_ptr = (VectorClass*)(inv_evec + mix_addr[c]);
// compute real partial likelihood vector
for (i = 0; i < nstates/VCSIZE; i++) {
vc_lh_left[i].load_a(&partial_lh_left[i*VCSIZE]);
vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]);
}
for (x = 0; x < nstates/VCSIZE; x++) {
size_t addr = c*nstatesqr/VCSIZE+x*nstates;
for (j = 0; j < VCSIZE; j++) {
size_t addr_com = addr+j*nstates/VCSIZE;
vleft[j] = eleft[addr_com] * vc_lh_left[0];
vright[j] = eright[addr_com] * vc_lh_right[0];
}
for (i = 1; i < nstates/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
size_t addr_com = addr+i+j*nstates/VCSIZE;
vleft[j] = mul_add(eleft[addr_com], vc_lh_left[i], vleft[j]);
vright[j] = mul_add(eright[addr_com], vc_lh_right[i], vright[j]);
}
}
vc_partial_lh_tmp[x] = horizontal_add(vleft) * horizontal_add(vright);
}
// compute dot-product with inv_eigenvector
for (i = 0; i < nstates; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
res[j] = vc_partial_lh_tmp[0] * vc_inv_evec_ptr[(i+j)*nstates/VCSIZE];
}
for (x = 1; x < nstates/VCSIZE; x++)
for (j = 0; j < VCSIZE; j++)
res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec_ptr[(i+j)*nstates/VCSIZE+x], res[j]);
VectorClass sum_res = horizontal_add(res);
sum_res.store_a(&partial_lh[i]);
vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check
}
partial_lh += nstates;
partial_lh_left += nstates;
partial_lh_right += nstates;
}
// check if one should scale partial likelihoods
double lh_max = horizontal_max(vc_max);
if (lh_max < SCALING_THRESHOLD && ptn_invar[ptn] == 0.0) {
// now do the likelihood scaling
partial_lh -= block; // revert its pointer
VectorClass scale_thres(SCALING_THRESHOLD_INVER);
for (i = 0; i < block; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]);
}
// unobserved const pattern will never have underflow
sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn];
dad_branch->scale_num[ptn] += 1;
partial_lh += block; // increase the pointer again
}
}
dad_branch->lh_scale_factor += sum_scale;
}
if (partial_lh_leaves)
aligned_free(partial_lh_leaves);
aligned_free(echildren);
}
template <class VectorClass, const int VCSIZE, const int nstates>
void PhyloTree::computeLikelihoodDervEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, double &df, double &ddf) {
PhyloNode *node = (PhyloNode*) dad_branch->node;
PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad);
if (!central_partial_lh)
initializeAllPartialLh();
if (node->isLeaf()) {
PhyloNode *tmp_node = dad;
dad = node;
node = tmp_node;
PhyloNeighbor *tmp_nei = dad_branch;
dad_branch = node_branch;
node_branch = tmp_nei;
}
if ((dad_branch->partial_lh_computed & 1) == 0)
computePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad);
if ((node_branch->partial_lh_computed & 1) == 0)
computePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node);
df = ddf = 0.0;
size_t ncat = site_rate->getNRate();
size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures();
size_t block = ncat_mix * nstates;
size_t tip_block = nstates * model->getNMixtures();
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE);
size_t mix_addr_nstates[ncat_mix];
size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat;
double *eval = model->getEigenvalues();
assert(eval);
VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block);
VectorClass *vc_val1 = (VectorClass*)aligned_alloc<double>(block);
VectorClass *vc_val2 = (VectorClass*)aligned_alloc<double>(block);
VectorClass vc_len = dad_branch->length;
for (c = 0; c < ncat_mix; c++) {
size_t m = c/denom;
mix_addr_nstates[c] = m*nstates;
size_t mycat = c%ncat;
double *eval_ptr = eval + m*nstates;
VectorClass vc_rate = site_rate->getRate(mycat);
VectorClass vc_prop = site_rate->getProp(mycat) * model->getMixtureWeight(m);
for (i = 0; i < nstates/VCSIZE; i++) {
VectorClass cof = VectorClass().load_a(&eval_ptr[i*VCSIZE]) * vc_rate;
VectorClass val = exp(cof*vc_len) * vc_prop;
VectorClass val1_ = cof*val;
vc_val0[c*nstates/VCSIZE+i] = val;
vc_val1[c*nstates/VCSIZE+i] = val1_;
vc_val2[c*nstates/VCSIZE+i] = cof*val1_;
}
}
assert(theta_all);
if (!theta_computed) {
theta_computed = true;
// precompute theta for fast branch length optimization
if (dad->isLeaf()) {
// special treatment for TIP-INTERNAL NODE case
#ifdef _OPENMP
#pragma omp parallel for private(ptn, i, c)
#endif
for (ptn = 0; ptn < nptn; ptn++) {
double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
double *theta = theta_all + ptn*block;
double *this_tip_partial_lh = tip_partial_lh + tip_block*((ptn < orig_nptn) ? (aln->at(ptn))[dad->id] : model_factory->unobserved_ptns[ptn-orig_nptn]);
for (c = 0; c < ncat_mix; c++) {
double *lh_dad = this_tip_partial_lh + mix_addr_nstates[c];
for (i = 0; i < nstates; i+=VCSIZE) {
(VectorClass().load_a(&lh_dad[i]) * VectorClass().load_a(&partial_lh_dad[i])).store_a(&theta[i]);
}
partial_lh_dad += nstates;
theta += nstates;
}
}
} else {
// both dad and node are internal nodes
double *partial_lh_node = node_branch->partial_lh;
double *partial_lh_dad = dad_branch->partial_lh;
size_t all_entries = nptn*block;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < all_entries; i+=VCSIZE) {
(VectorClass().load_a(&partial_lh_node[i]) * VectorClass().load_a(&partial_lh_dad[i]))
.store_a(&theta_all[i]);
}
}
if (nptn < maxptn) {
// copy dummy values
for (ptn = nptn; ptn < maxptn; ptn++)
memcpy(&theta_all[ptn*block], theta_all, block*sizeof(double));
}
}
VectorClass vc_ptn[VCSIZE], vc_df[VCSIZE], vc_ddf[VCSIZE], vc_theta[VCSIZE];
VectorClass vc_unit = 1.0;
VectorClass vc_freq;
VectorClass df_final = 0.0, ddf_final = 0.0;
// these stores values of 2 consecutive patterns
VectorClass lh_ptn, df_ptn, ddf_ptn, inv_lh_ptn;
// perform 2 sites at the same time for SSE/AVX efficiency
#ifdef _OPENMP
#pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, vc_df, vc_ddf, vc_theta, inv_lh_ptn, lh_ptn, df_ptn, ddf_ptn)
{
VectorClass df_final_th = 0.0;
VectorClass ddf_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *theta = theta_all + ptn*block;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_theta[i].load_a(theta+i*block);
vc_ptn[i] = vc_val0[0] * vc_theta[i];
vc_df[i] = vc_val1[0] * vc_theta[i];
vc_ddf[i] = vc_val2[0] * vc_theta[i];
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_theta[j].load_a(&theta[i*VCSIZE+j*block]);
vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]);
vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]);
vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]);
}
}
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
inv_lh_ptn = vc_unit / abs(lh_ptn);
vc_freq.load_a(&ptn_freq[ptn]);
df_ptn = horizontal_add(vc_df) * inv_lh_ptn;
ddf_ptn = horizontal_add(vc_ddf) * inv_lh_ptn;
ddf_ptn = nmul_add(df_ptn, df_ptn, ddf_ptn);
#ifdef _OPENMP
df_final_th = mul_add(df_ptn, vc_freq, df_final_th);
ddf_final_th = mul_add(ddf_ptn, vc_freq, ddf_final_th);
#else
df_final = mul_add(df_ptn, vc_freq, df_final);
ddf_final = mul_add(ddf_ptn, vc_freq, ddf_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
df_final += df_final_th;
ddf_final += ddf_final_th;
}
}
#endif
df = horizontal_add(df_final);
ddf = horizontal_add(ddf_final);
if (isnan(df) || isinf(df)) {
df = 0.0;
ddf = 0.0;
// outWarning("Numerical instability (some site-likelihood = 0)");
}
// assert(isnormal(tree_lh));
if (orig_nptn < nptn) {
// ascertaiment bias correction
VectorClass lh_final = 0.0;
df_final = 0.0;
ddf_final = 0.0;
lh_ptn = 0.0;
df_ptn = 0.0;
ddf_ptn = 0.0;
double prob_const, df_const, ddf_const;
double *theta = &theta_all[orig_nptn*block];
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
df_final += df_ptn;
ddf_final += ddf_ptn;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_theta[i].load_a(theta+i*block);
vc_ptn[i] = vc_val0[0] * vc_theta[i];
vc_df[i] = vc_val1[0] * vc_theta[i];
vc_ddf[i] = vc_val2[0] * vc_theta[i];
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_theta[j].load_a(&theta[i*VCSIZE+j*block]);
vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]);
vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]);
vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]);
}
}
theta += block*VCSIZE;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
df_ptn = horizontal_add(vc_df);
ddf_ptn = horizontal_add(vc_ddf);
}
switch ((nptn-orig_nptn) % VCSIZE) {
case 0:
prob_const = horizontal_add(lh_final+lh_ptn);
df_const = horizontal_add(df_final+df_ptn);
ddf_const = horizontal_add(ddf_final+ddf_ptn);
break;
case 1:
prob_const = horizontal_add(lh_final)+lh_ptn[0];
df_const = horizontal_add(df_final)+df_ptn[0];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0];
break;
case 2:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1];
df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1];
break;
case 3:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2];
df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1]+df_ptn[2];
ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1]+ddf_ptn[2];
break;
default:
assert(0);
break;
}
prob_const = 1.0 - prob_const;
double df_frac = df_const / prob_const;
double ddf_frac = ddf_const / prob_const;
int nsites = aln->getNSite();
df += nsites * df_frac;
ddf += nsites *(ddf_frac + df_frac*df_frac);
}
assert(!isnan(df));
aligned_free(vc_val2);
aligned_free(vc_val1);
aligned_free(vc_val0);
}
template <class VectorClass, const int VCSIZE, const int nstates>
double PhyloTree::computeLikelihoodBranchEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) {
PhyloNode *node = (PhyloNode*) dad_branch->node;
PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad);
if (!central_partial_lh)
initializeAllPartialLh();
if (node->isLeaf()) {
PhyloNode *tmp_node = dad;
dad = node;
node = tmp_node;
PhyloNeighbor *tmp_nei = dad_branch;
dad_branch = node_branch;
node_branch = tmp_nei;
}
if ((dad_branch->partial_lh_computed & 1) == 0)
computePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad);
if ((node_branch->partial_lh_computed & 1) == 0)
computePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node);
double tree_lh = node_branch->lh_scale_factor + dad_branch->lh_scale_factor;
size_t ncat = site_rate->getNRate();
size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures();
size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat;
size_t mix_addr_nstates[ncat_mix];
size_t block = ncat_mix * nstates;
size_t tip_block = nstates * model->getNMixtures();
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE);
double *eval = model->getEigenvalues();
assert(eval);
VectorClass *vc_val = (VectorClass*)aligned_alloc<double>(block);
for (c = 0; c < ncat_mix; c++) {
size_t mycat = c%ncat;
size_t m = c/denom;
mix_addr_nstates[c] = m*nstates;
double *eval_ptr = eval + mix_addr_nstates[c];
VectorClass vc_len(site_rate->getRate(mycat)*dad_branch->length);
VectorClass vc_prop(site_rate->getProp(c) * model->getMixtureWeight(m));
for (i = 0; i < nstates/VCSIZE; i++) {
// eval is not aligned!
vc_val[c*nstates/VCSIZE+i] = exp(VectorClass().load_a(&eval_ptr[i*VCSIZE]) * vc_len) * vc_prop;
}
}
double prob_const = 0.0;
if (dad->isLeaf()) {
// special treatment for TIP-INTERNAL NODE case
// precompute information from one tip
double *partial_lh_node = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block);
IntVector states_dad = aln->seq_states[dad->id];
states_dad.push_back(aln->STATE_UNKNOWN);
for (IntVector::iterator it = states_dad.begin(); it != states_dad.end(); it++) {
double *lh_node = partial_lh_node + (*it)*block;
double *lh_tip = tip_partial_lh + (*it)*tip_block;
VectorClass *vc_val_tmp = vc_val;
for (c = 0; c < ncat_mix; c++) {
double *this_lh_tip = lh_tip + mix_addr_nstates[c];
for (i = 0; i < nstates; i+=VCSIZE) {
(vc_val_tmp[i/VCSIZE] * VectorClass().load_a(&this_lh_tip[i])).store_a(&lh_node[i]);
}
lh_node += nstates;
vc_val_tmp += nstates/VCSIZE;
}
}
//VectorClass vc_tip_partial_lh[nstates];
//VectorClass vc_partial_lh_dad[VCSIZE]
VectorClass vc_ptn[VCSIZE];
VectorClass lh_final(0.0), vc_freq;
VectorClass lh_ptn; // store likelihoods of VCSIZE consecutive patterns
// double **lh_states_dad = aligned_alloc<double*>(maxptn);
// for (ptn = 0; ptn < orig_nptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[(aln->at(ptn))[dad->id] * tip_block];
// for (ptn = orig_nptn; ptn < nptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[model_factory->unobserved_ptns[ptn-orig_nptn] * tip_block];
// // initialize beyond #patterns for efficiency
// for (ptn = nptn; ptn < maxptn; ptn++)
// lh_states_dad[ptn] = &tip_partial_lh[aln->STATE_UNKNOWN * tip_block];
int *ptn_states_dad = aligned_alloc<int>(maxptn);
for (ptn = 0; ptn < orig_nptn; ptn++)
ptn_states_dad[ptn] = (aln->at(ptn))[dad->id];
for (ptn = orig_nptn; ptn < nptn; ptn++)
ptn_states_dad[ptn] = model_factory->unobserved_ptns[ptn-orig_nptn];
// initialize beyond #patterns for efficiency
for (ptn = nptn; ptn < maxptn; ptn++)
ptn_states_dad[ptn] = aln->STATE_UNKNOWN;
// copy dummy values because VectorClass will access beyond nptn
for (ptn = nptn; ptn < maxptn; ptn++)
memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double));
#ifdef _OPENMP
#pragma omp parallel private(ptn, i, j, vc_ptn, vc_freq, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
// main loop over all patterns with a step size of VCSIZE
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
//double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = 0.0;
double *partial_lh_dad = dad_branch->partial_lh + (ptn+j)*block;
int state_dad = ptn_states_dad[ptn+j];
double *lh_node = &partial_lh_node[state_dad*block];
for (i = 0; i < block; i+=VCSIZE) {
vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]),
VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]);
}
}
// initialize vc_tip_partial_lh
// for (j = 0; j < VCSIZE; j++) {
// double *lh_dad = lh_states_dad[ptn+j];
// for (i = 0; i < nstates/VCSIZE; i++) {
// vc_tip_partial_lh[j*(nstates/VCSIZE)+i].load_a(&lh_dad[i*VCSIZE]);
// }
// vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block]);
// vc_ptn[j] = vc_val[0] * vc_tip_partial_lh[j*(nstates/VCSIZE)] * vc_partial_lh_dad[j];
// }
//
// // compute vc_ptn
// for (i = 1; i < block/VCSIZE; i++)
// for (j = 0; j < VCSIZE; j++) {
// vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block+i*VCSIZE]);
// vc_ptn[j] = mul_add(vc_val[i] * vc_tip_partial_lh[j*(nstates/VCSIZE)+i%(nstates/VCSIZE)],
// vc_partial_lh_dad[j], vc_ptn[j]);
// }
vc_freq.load_a(&ptn_freq[ptn]);
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
// multiply with pattern frequency
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
if (isnan(tree_lh) || isinf(tree_lh)) {
cout << "WARNING: Numerical underflow caused by alignment sites there";
i = aln->getNSite();
for (j = 0; j < i; j++) {
ptn = aln->getPatternID(j);
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
cout << " " << j+1;
}
}
tree_lh = node_branch->lh_scale_factor + dad_branch->lh_scale_factor;
for (ptn = 0; ptn < orig_nptn; ptn++) {
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
_pattern_lh[ptn] = LOG_SCALING_THRESHOLD*4; // log(2^(-1024))
}
tree_lh += _pattern_lh[ptn] * ptn_freq[ptn];
}
cout << endl;
if (verbose_mode >= VB_MED) {
printTree(cout);
cout << endl;
}
// cout << "WARNING: Tree log-likelihood is set to " << tree_lh << endl;
}
if (orig_nptn < nptn) {
lh_final = 0.0;
lh_ptn = 0.0;
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
// double *partial_lh_dad = &dad_branch->partial_lh[ptn*block];
lh_final += lh_ptn;
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = 0.0;
double *partial_lh_dad = dad_branch->partial_lh + (ptn+j)*block;
int state_dad = ptn_states_dad[ptn+j];
double *lh_node = &partial_lh_node[state_dad*block];
for (i = 0; i < block; i+=VCSIZE) {
vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]),
VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]);
}
}
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (dad_branch->scale_num[ptn+j] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
}
switch ((nptn-orig_nptn)%VCSIZE) {
case 0: prob_const = horizontal_add(lh_final+lh_ptn); break;
case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break;
case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break;
case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break;
default: assert(0); break;
}
}
aligned_free(ptn_states_dad);
aligned_free(partial_lh_node);
// ascertainment bias correction
// if (orig_nptn < nptn) {
// lh_final = 0.0;
// lh_ptn = 0.0;
// for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
// double *partial_lh_dad = &dad_branch->partial_lh[ptn*block];
// lh_final += lh_ptn;
//
// // initialize vc_tip_partial_lh
// for (j = 0; j < VCSIZE; j++) {
// double *lh_dad = lh_states_dad[ptn+j];
// for (i = 0; i < nstates/VCSIZE; i++) {
// vc_tip_partial_lh[j*(nstates/VCSIZE)+i].load(&lh_dad[i*VCSIZE]); // lh_dad is not aligned!
// }
// vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block]);
// vc_ptn[j] = vc_val[0] * vc_tip_partial_lh[j*(nstates/VCSIZE)] * vc_partial_lh_dad[j];
// }
//
// // compute vc_ptn
// for (i = 1; i < block/VCSIZE; i++)
// for (j = 0; j < VCSIZE; j++) {
// vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block+i*VCSIZE]);
// vc_ptn[j] = mul_add(vc_val[i] * vc_tip_partial_lh[j*(nstates/VCSIZE)+i%(nstates/VCSIZE)],
// vc_partial_lh_dad[j], vc_ptn[j]);
// }
//
// // bugfix 2016-01-21, prob_const can be rescaled
// for (j = 0; j < VCSIZE; j++)
// if (dad_branch->scale_num[ptn+j] >= 1)
// vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
//
// // ptn_invar[ptn] is not aligned
// lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
// }
// switch ((nptn-orig_nptn)%VCSIZE) {
// case 0: prob_const = horizontal_add(lh_final+lh_ptn); break;
// case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break;
// case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break;
// case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break;
// default: assert(0); break;
// }
// }
// aligned_free(lh_states_dad);
} else {
// both dad and node are internal nodes
VectorClass vc_partial_lh_node[VCSIZE];
VectorClass vc_partial_lh_dad[VCSIZE], vc_ptn[VCSIZE];
VectorClass lh_final(0.0), vc_freq;
VectorClass lh_ptn;
// copy dummy values because VectorClass will access beyond nptn
for (ptn = nptn; ptn < maxptn; ptn++) {
memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double));
memcpy(&node_branch->partial_lh[ptn*block], node_branch->partial_lh, block*sizeof(double));
}
#ifdef _OPENMP
#pragma omp parallel private(ptn, i, j, vc_partial_lh_node, vc_partial_lh_dad, vc_ptn, vc_freq, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *partial_lh_dad = dad_branch->partial_lh + ptn*block;
double *partial_lh_node = node_branch->partial_lh + ptn*block;
for (j = 0; j < VCSIZE; j++)
vc_ptn[j] = 0.0;
for (i = 0; i < block; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]);
vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]);
vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]);
}
}
vc_freq.load_a(&ptn_freq[ptn]);
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
assert(!isnan(tree_lh) && !isinf(tree_lh));
if (orig_nptn < nptn) {
// ascertainment bias correction
lh_final = 0.0;
lh_ptn = 0.0;
double *partial_lh_node = &node_branch->partial_lh[orig_nptn*block];
double *partial_lh_dad = &dad_branch->partial_lh[orig_nptn*block];
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
for (j = 0; j < VCSIZE; j++)
vc_ptn[j] = 0.0;
for (i = 0; i < block; i+=VCSIZE) {
for (j = 0; j < VCSIZE; j++) {
vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]);
vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]);
vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]);
}
}
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (dad_branch->scale_num[ptn+j] + node_branch->scale_num[ptn+j] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
partial_lh_node += block*VCSIZE;
partial_lh_dad += block*VCSIZE;
}
switch ((nptn-orig_nptn)%VCSIZE) {
case 0: prob_const = horizontal_add(lh_final+lh_ptn); break;
case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break;
case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break;
case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break;
default: assert(0); break;
}
}
}
if (orig_nptn < nptn) {
// ascertainment bias correction
assert(prob_const < 1.0 && prob_const >= 0.0);
prob_const = log(1.0 - prob_const);
for (ptn = 0; ptn < orig_nptn; ptn++)
_pattern_lh[ptn] -= prob_const;
tree_lh -= aln->getNSite()*prob_const;
}
aligned_free(vc_val);
return tree_lh;
}
template <class VectorClass, const int VCSIZE, const int nstates>
double PhyloTree::computeLikelihoodFromBufferEigenSIMD() {
assert(theta_all && theta_computed);
double tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor;
size_t ncat = site_rate->getNRate();
size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures();
size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat;
size_t block = ncat_mix * nstates;
size_t ptn; // for big data size > 4GB memory required
size_t c, i, j;
size_t orig_nptn = aln->size();
size_t nptn = aln->size()+model_factory->unobserved_ptns.size();
// size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE;
double *eval = model->getEigenvalues();
assert(eval);
VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block);
VectorClass vc_len = current_it->length;
for (c = 0; c < ncat_mix; c++) {
size_t m = c/denom;
double *eval_ptr = eval + (m)*nstates;
size_t mycat = c%ncat;
VectorClass vc_rate = site_rate->getRate(mycat);
VectorClass vc_prop = site_rate->getProp(mycat) * model->getMixtureWeight(m);
for (i = 0; i < nstates/VCSIZE; i++) {
VectorClass cof = VectorClass().load_a(&eval_ptr[i*VCSIZE]) * vc_rate;
VectorClass val = exp(cof*vc_len) * vc_prop;
vc_val0[c*nstates/VCSIZE+i] = val;
}
}
VectorClass vc_ptn[VCSIZE];
VectorClass vc_freq;
VectorClass lh_final = 0.0;
// these stores values of 2 consecutive patterns
VectorClass lh_ptn;
// perform 2 sites at the same time for SSE/AVX efficiency
#ifdef _OPENMP
#pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, lh_ptn)
{
VectorClass lh_final_th = 0.0;
#pragma omp for nowait
#endif
for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) {
double *theta = theta_all + ptn*block;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block);
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]);
}
}
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]);
lh_ptn = log(abs(lh_ptn));
lh_ptn.store_a(&_pattern_lh[ptn]);
vc_freq.load_a(&ptn_freq[ptn]);
#ifdef _OPENMP
lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th);
#else
lh_final = mul_add(lh_ptn, vc_freq, lh_final);
#endif
}
#ifdef _OPENMP
#pragma omp critical
{
lh_final += lh_final_th;
}
}
#endif
tree_lh += horizontal_add(lh_final);
if (isnan(tree_lh) || isinf(tree_lh)) {
cout << "WARNING: Numerical underflow caused by alignment sites here";
i = aln->getNSite();
for (j = 0, c = 0; j < i; j++) {
ptn = aln->getPatternID(j);
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
cout << " " << j+1;
c++;
if (c >= 10) {
cout << " ...";
break;
}
}
}
cout << endl;
tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor;
for (ptn = 0; ptn < orig_nptn; ptn++) {
if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) {
_pattern_lh[ptn] = LOG_SCALING_THRESHOLD*4; // log(2^(-1024))
}
tree_lh += _pattern_lh[ptn] * ptn_freq[ptn];
}
}
if (orig_nptn < nptn) {
// ascertaiment bias correction
lh_final = 0.0;
lh_ptn = 0.0;
double prob_const;// df_const, ddf_const;
double *theta = &theta_all[orig_nptn*block];
UBYTE sum_scale_num[nstates+VCSIZE];
memset(sum_scale_num, 0, sizeof(UBYTE)*(nstates+VCSIZE));
if (current_it->node->isLeaf())
memcpy(sum_scale_num, current_it_back->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn));
else if (current_it_back->node->isLeaf())
memcpy(sum_scale_num, current_it->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn));
else {
for (ptn = orig_nptn; ptn < nptn; ptn++)
sum_scale_num[ptn-orig_nptn] = current_it->scale_num[ptn] + current_it_back->scale_num[ptn];
}
for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) {
lh_final += lh_ptn;
// initialization
for (i = 0; i < VCSIZE; i++) {
vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block);
}
for (i = 1; i < block/VCSIZE; i++) {
for (j = 0; j < VCSIZE; j++) {
vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]);
}
}
theta += block*VCSIZE;
// bugfix 2016-01-21, prob_const can be rescaled
for (j = 0; j < VCSIZE; j++)
if (sum_scale_num[ptn+j-orig_nptn] >= 1)
vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD;
// ptn_invar[ptn] is not aligned
lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]);
}
switch ((nptn-orig_nptn) % VCSIZE) {
case 0:
prob_const = horizontal_add(lh_final+lh_ptn);
break;
case 1:
prob_const = horizontal_add(lh_final)+lh_ptn[0];
break;
case 2:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1];
break;
case 3:
prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2];
break;
default:
assert(0);
break;
}
prob_const = log(1.0 - prob_const);
tree_lh -= aln->getNSite() * prob_const;
for (ptn = 0; ptn < orig_nptn; ptn++)
_pattern_lh[ptn] -= prob_const;
}
aligned_free(vc_val0);
return tree_lh;
}
*/
/****************************************************************************
Highly optimized Parsimony function
****************************************************************************/
#ifdef _MSC_VER
#define MEM_ALIGN_BEGIN __declspec(align(32))
#define MEM_ALIGN_END
#else
#define MEM_ALIGN_BEGIN
#define MEM_ALIGN_END __attribute__((aligned(32)))
#endif
inline UINT fast_popcount(Vec4ui &x) {
MEM_ALIGN_BEGIN UINT vec[4] MEM_ALIGN_END;
x.store_a(vec);
return popcount_lauradoux(vec, 4);
}
inline UINT fast_popcount(Vec8ui &x) {
#if (defined (__GNUC__) || defined(__clang__)) && !defined(__ARM_NEON)
MEM_ALIGN_BEGIN uint64_t vec[4] MEM_ALIGN_END;
MEM_ALIGN_BEGIN uint64_t res[4] MEM_ALIGN_END;
Vec8ui y;
x.store_a(vec);
__asm("popcntq %1, %0" : "=r"(res[0]) : "r"(vec[0]) : );
__asm("popcntq %1, %0" : "=r"(res[1]) : "r"(vec[1]) : );
__asm("popcntq %1, %0" : "=r"(res[2]) : "r"(vec[2]) : );
__asm("popcntq %1, %0" : "=r"(res[3]) : "r"(vec[3]) : );
y.load_a(res);
return horizontal_add(y);
#else
MEM_ALIGN_BEGIN uint64_t vec[4] MEM_ALIGN_END;
MEM_ALIGN_BEGIN int res[4] MEM_ALIGN_END;
Vec4ui y;
x.store_a(vec);
res[0] = _mm_popcnt_u64(vec[0]);
res[1] = _mm_popcnt_u64(vec[1]);
res[2] = _mm_popcnt_u64(vec[2]);
res[3] = _mm_popcnt_u64(vec[3]);
y.load_a(res);
return horizontal_add(y);
#endif
}
inline void horizontal_popcount(Vec4ui &x) {
MEM_ALIGN_BEGIN UINT vec[4] MEM_ALIGN_END;
x.store_a(vec);
vec[0] = vml_popcnt(vec[0]);
vec[1] = vml_popcnt(vec[1]);
vec[2] = vml_popcnt(vec[2]);
vec[3] = vml_popcnt(vec[3]);
x.load_a(vec);
}
inline void horizontal_popcount(Vec8ui &x) {
MEM_ALIGN_BEGIN UINT vec[8] MEM_ALIGN_END;
x.store_a(vec);
vec[0] = vml_popcnt(vec[0]);
vec[1] = vml_popcnt(vec[1]);
vec[2] = vml_popcnt(vec[2]);
vec[3] = vml_popcnt(vec[3]);
vec[4] = vml_popcnt(vec[4]);
vec[5] = vml_popcnt(vec[5]);
vec[6] = vml_popcnt(vec[6]);
vec[7] = vml_popcnt(vec[7]);
x.load_a(vec);
}
template<class VectorClass>
void PhyloTree::computePartialParsimonyFastSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) {
if (dad_branch->partial_lh_computed & 2)
return;
Node *node = dad_branch->node;
int nstates = aln->getMaxNumStates();
int site = 0;
const int VCSIZE = VectorClass::size();
const int NUM_BITS = VectorClass::size() * UINT_BITS;
dad_branch->partial_lh_computed |= 2;
if (node->name == ROOT_NAME) {
ASSERT(dad);
// special treatment for root node
// if (aln->ordered_pattern.empty())
// aln->orderPatternByNumChars();
// ASSERT(!aln->ordered_pattern.empty());
int pars_size = getBitsBlockSize();
memset(dad_branch->partial_pars, 255, pars_size*sizeof(UINT));
size_t nsites = (aln->num_parsimony_sites+NUM_BITS-1)/NUM_BITS;
dad_branch->partial_pars[nstates*VCSIZE*nsites] = 0;
} else if (node->isLeaf() && dad) {
// external node
vector<Alignment*> *partitions = NULL;
if (aln->isSuperAlignment())
partitions = &((SuperAlignment*)aln)->partitions;
else {
partitions = new vector<Alignment*>;
partitions->push_back(aln);
}
// if (aln->ordered_pattern.empty())
// aln->orderPatternByNumChars();
// ASSERT(!aln->ordered_pattern.empty());
int leafid = node->id;
int pars_size = getBitsBlockSize();
memset(dad_branch->partial_pars, 0, pars_size*sizeof(UINT));
int ambi_aa[] = {2, 3, 5, 6, 9, 10}; // {4+8, 32+64, 512+1024};
UINT *x = dad_branch->partial_pars;
int start_pos = 0;
for (vector<Alignment*>::iterator alnit = partitions->begin(); alnit != partitions->end(); alnit++) {
int end_pos = start_pos + (*alnit)->ordered_pattern.size();
switch ((*alnit)->seq_type) {
case SEQ_DNA:
for (int patid = start_pos; patid != end_pos; patid++) {
Alignment::iterator pat = aln->ordered_pattern.begin()+ patid;
int state = pat->at(leafid);
int freq = pat->frequency;
if (state < 4) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
x[state*VCSIZE + site/UINT_BITS] |= (1 << (site % UINT_BITS));
}
} else if (state == (*alnit)->STATE_UNKNOWN) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
UINT bit1 = (1 << (site%UINT_BITS));
UINT *p = x+(site/UINT_BITS);
p[0] |= bit1;
p[VCSIZE] |= bit1;
p[2*VCSIZE] |= bit1;
p[3*VCSIZE] |= bit1;
}
} else {
state -= 3;
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
UINT *p = x + ((site/UINT_BITS));
UINT bit1 = (1 << (site%UINT_BITS));
for (int i = 0; i < 4; i++)
if (state & (1<<i))
p[i*VCSIZE] |= bit1;
}
}
}
break;
case SEQ_PROTEIN:
for (int patid = start_pos; patid != end_pos; patid++) {
Alignment::iterator pat = aln->ordered_pattern.begin()+ patid;
int state = pat->at(leafid);
int freq = pat->frequency;
if (state < 20) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
x[state*VCSIZE + site/UINT_BITS] |= (1 << (site % UINT_BITS));
}
} else if (state == (*alnit)->STATE_UNKNOWN) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
UINT bit1 = (1 << (site%UINT_BITS));
UINT *p = x+(site/UINT_BITS);
for (int i = 0; i < 20; i++)
p[i*VCSIZE] |= bit1;
}
} else {
ASSERT(state < 23);
state = (state-20)*2;
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
UINT *p = x + ((site/UINT_BITS));
UINT bit1 = (1 << (site%UINT_BITS));
p[ambi_aa[state]*VCSIZE] |= bit1;
p[ambi_aa[state+1]*VCSIZE] |= bit1;
}
}
}
break;
default:
for (int patid = start_pos; patid != end_pos; patid++) {
Alignment::iterator pat = aln->ordered_pattern.begin()+ patid;
int state = pat->at(leafid);
int freq = pat->frequency;
if (aln->seq_type == SEQ_POMO && state >= nstates && state < aln->STATE_UNKNOWN) {
state -= nstates;
ASSERT(state < aln->pomo_sampled_states.size());
int id1 = aln->pomo_sampled_states[state] & 3;
int id2 = (aln->pomo_sampled_states[state] >> 16) & 3;
int value1 = (aln->pomo_sampled_states[state] >> 2) & 16383;
int value2 = aln->pomo_sampled_states[state] >> 18;
double weight1 = ((double)value1)/(value1+value2);
// int N = aln->virtual_pop_size;
// int M = value1 + value2;
// 2016-09-30: resolving polymorphic states to fixed states
// value1 = value1*N/(value1+value2);
int real_state;
if (weight1 < 1.0/4)
real_state = id2;
else if (weight1 > 3.0/4)
real_state = id1;
else
real_state = (*alnit)->STATE_UNKNOWN;
/*
if (value1 == 0)
real_state = id2;
else if (value1 >= N)
real_state = id1;
else {
int j;
if (id1 == 0) j = id2 - 1;
else j = id1 + id2;
real_state = 4 + j*(N-2) + j + value1 - 1;
}
*/
state = real_state;
ASSERT(state < 4 || state == (*alnit)->STATE_UNKNOWN);
// assert(state < nstates);
}
if (state < (*alnit)->num_states) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
x[state*VCSIZE + site/UINT_BITS] |= (1 << (site % UINT_BITS));
}
} else if (state == (*alnit)->STATE_UNKNOWN) {
for (int j = 0; j < freq; j++, site++) {
if (site == NUM_BITS) {
x += nstates*VCSIZE;
site = 0;
}
UINT bit1 = (1 << (site%UINT_BITS));
UINT *p = x+(site/UINT_BITS);
for (int i = 0; i < (*alnit)->num_states; i++)
p[i*VCSIZE] |= bit1;
}
} else {
ASSERT(0);
}
} // FOR loop
break; // of switch
} // end of switch
start_pos = end_pos;
} // of end FOR LOOP
ASSERT(start_pos == aln->ordered_pattern.size());
// assert(site == aln->num_parsimony_sites % NUM_BITS);
// add dummy states
if (site > 0 && site < NUM_BITS) {
x += site/UINT_BITS;
if (site % UINT_BITS != 0) {
*x |= ~((1<<(site%UINT_BITS)) - 1);
x++;
}
int max_sites = ((site+UINT_BITS-1)/UINT_BITS);
memset(x, 255, (VCSIZE - max_sites)*sizeof(UINT));
}
if (!aln->isSuperAlignment())
delete partitions;
} else {
// internal node
ASSERT(node->degree() == 3); // it works only for strictly bifurcating tree
PhyloNeighbor *left = NULL, *right = NULL; // left & right are two neighbors leading to 2 subtrees
FOR_NEIGHBOR_IT(node, dad, it) {
PhyloNeighbor* pit = (PhyloNeighbor*) (*it);
if ((pit->partial_lh_computed & 2) == 0) {
computePartialParsimonyFastSIMD<VectorClass>(pit, (PhyloNode*) node);
}
if (!left) left = pit; else right = pit;
}
// VectorClass score = 0;
UINT score = 0;
size_t nsites = (aln->num_parsimony_sites+NUM_BITS-1)/NUM_BITS;
int entry_size = nstates * VCSIZE;
switch (nstates) {
case 4:
#ifdef _OPENMP
#pragma omp parallel for private (site) reduction(+: score) if(nsites>num_threads*10)
#endif
for (site = 0; site<nsites; site++) {
size_t offset = entry_size*site;
VectorClass *x = (VectorClass*)(left->partial_pars + offset);
VectorClass *y = (VectorClass*)(right->partial_pars + offset);
VectorClass *z = (VectorClass*)(dad_branch->partial_pars + offset);
z[0] = x[0] & y[0];
z[1] = x[1] & y[1];
z[2] = x[2] & y[2];
z[3] = x[3] & y[3];
VectorClass w = z[0] | z[1] | z[2] | z[3];
w = ~w;
z[0] |= w & (x[0] | y[0]);
z[1] |= w & (x[1] | y[1]);
z[2] |= w & (x[2] | y[2]);
z[3] |= w & (x[3] | y[3]);
// horizontal_popcount(w);
// score += w;
score += fast_popcount(w);
// x += 4;
// y += 4;
// z += 4;
}
break;
default:
#ifdef _OPENMP
#pragma omp parallel for private (site) reduction(+: score) if(nsites>num_threads*10)
#endif
for (site = 0; site<nsites; site++) {
size_t offset = entry_size*site;
VectorClass *x = (VectorClass*)(left->partial_pars + offset);
VectorClass *y = (VectorClass*)(right->partial_pars + offset);
VectorClass *z = (VectorClass*)(dad_branch->partial_pars + offset);
int i;
VectorClass w = 0;
for (i = 0; i < nstates; i++) {
z[i] = x[i] & y[i];
w |= z[i];
}
w = ~w;
for (i = 0; i < nstates; i++) {
z[i] |= w & (x[i] | y[i]);
}
// horizontal_popcount(w);
// score += w;
score += fast_popcount(w);
x += nstates;
y += nstates;
z += nstates;
}
break;
}
// UINT sum_score = horizontal_add(score);
// UINT *zscore = (UINT*)z;
// UINT *xscore = (UINT*)x;
// UINT *yscore = (UINT*)y;
dad_branch->partial_pars[nstates*VCSIZE*nsites] = score + left->partial_pars[nstates*VCSIZE*nsites] + right->partial_pars[nstates*VCSIZE*nsites];
}
}
template<class VectorClass>
int PhyloTree::computeParsimonyBranchFastSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, int *branch_subst) {
PhyloNode *node = (PhyloNode*) dad_branch->node;
PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad);
ASSERT(node_branch);
if (!central_partial_pars)
initializeAllPartialPars();
if ((dad_branch->partial_lh_computed & 2) == 0)
computePartialParsimonyFastSIMD<VectorClass>(dad_branch, dad);
if ((node_branch->partial_lh_computed & 2) == 0)
computePartialParsimonyFastSIMD<VectorClass>(node_branch, node);
int site;
int nstates = aln->getMaxNumStates();
// VectorClass score = 0;
// VectorClass w;
const int NUM_BITS = VectorClass::size() * UINT_BITS;
int nsites = (aln->num_parsimony_sites + NUM_BITS - 1)/NUM_BITS;
int entry_size = nstates * VectorClass::size();
int scoreid = nsites*entry_size;
UINT sum_end_node = (dad_branch->partial_pars[scoreid] + node_branch->partial_pars[scoreid]);
UINT score = sum_end_node;
UINT lower_bound = best_pars_score;
if (branch_subst) lower_bound = INT_MAX;
switch (nstates) {
case 4:
#ifdef _OPENMP
#pragma omp parallel for private (site) reduction(+: score) if(nsites>num_threads*10)
#endif
for (site = 0; site < nsites; site++) {
size_t offset = entry_size*site;
VectorClass *x = (VectorClass*)(dad_branch->partial_pars + offset);
VectorClass *y = (VectorClass*)(node_branch->partial_pars + offset);
VectorClass w = (x[0] & y[0]) | (x[1] & y[1]) | (x[2] & y[2]) | (x[3] & y[3]);
w = ~w;
// horizontal_popcount(w);
// score += w;
score += fast_popcount(w);
#ifndef _OPENMP
if (score >= lower_bound)
break;
#endif
}
break;
default:
#ifdef _OPENMP
#pragma omp parallel for private (site) reduction(+: score) if(nsites>num_threads*10)
#endif
for (site = 0; site < nsites; site++) {
size_t offset = entry_size*site;
VectorClass *x = (VectorClass*)(dad_branch->partial_pars + offset);
VectorClass *y = (VectorClass*)(node_branch->partial_pars + offset);
VectorClass w = x[0] & y[0];
for (int i = 1; i < nstates; i++) {
w |= x[i] & y[i];
}
w = ~w;
// horizontal_popcount(w);
// score += w;
score += fast_popcount(w);
#ifndef _OPENMP
if (score >= lower_bound)
break;
#endif
}
break;
}
// UINT sum_score = horizontal_add(score);
// if (branch_subst)
// *branch_subst = sum_score;
if (branch_subst)
*branch_subst = score - sum_end_node;
// UINT *xscore = (UINT*)x;
// UINT *yscore = (UINT*)y;
// sum_score += *xscore + *yscore;
// score += *xscore + *yscore;
// return sum_score;
return score;
}
#endif /* PHYLOKERNEL_H_ */
|
lu2.c | // lu2.c
//
// test program for blocked LU decomposition
//
// Time-stamp: <2019-05-06 14:43:09 makino>
//#define NOBLAS
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <getopt.h>
#include <emmintrin.h>
typedef double v2df __attribute__((vector_size(16)));
typedef union {v2df v; double s[2];}v2u;
#include <lu2tlib.h>
#include <lu2lib.h>
#define LIBTESTGEMMMAIN
#include "libtestgemm.h"
void timer_init();
double cpusec();
double wsec();
#define RDIM (n+16)
void copymats( int n, double a[n][RDIM], double a2[n][RDIM])
{
int i, j;
for(i=0;i<n;i++){
for(j=0;j<n+2;j++) a2[i][j] = a[i][j];
}
}
void copybvect( int n, double a[][RDIM], double b[])
{
int i;
for(i=0;i<n;i++)b[i] = a[i][n];
}
void showresult(int n, double a[n][RDIM], double x[], int verbose)
{
int i, j;
double emax = 0;
for(i=0;i<n;i++){
int k;
double b2=0;
// printf("%3d: ", i);
// for(j=0;j<n;j++) printf(" %10.3e", a[i][j]);
for(j=0;j<n;j++) b2 += a[i][j] * x[j];
double err = b2-a[i][n];
emax = (fabs(err) > emax) ? fabs(err):emax;
if (verbose) printf("%5d %10.3e %10.3e %10.3e %10.3e \n", i,x[i], a[i][n], b2, err);
}
printf("Emax= %10.3e\n", emax);
}
void readmat( int n, double a[n][RDIM])
{
int i, j;
for(i=0;i<n;i++){
for(j=0;j<n+1;j++) scanf("%le", &(a[i][j]));
}
}
void randomsetmat( int n, int seed, double a[n][RDIM])
{
long int i, j;
srand48((long) seed);
for(i=0;i<n;i++){
// printf("i=%d\n", i);
double * ap = a[i];
for(j=0;j<n;j++) {
// ap[j]=drand48();
ap[j]=drand48()-0.5;
}
// printf("n, i=%d\n", i);
// a[i][n]=1;
a[i][n]=drand48()-0.5;
}
}
void printmat( int n, double a[n][RDIM])
{
int i, j;
for(i=0;i<n;i++){
printf("%3d: ", i);
for(j=0;j<n+1;j++) printf(" %10.3e", a[i][j]);
printf("\n");
}
printf("\n");
}
void printsqmat( int n, double a[n][n])
{
int i, j;
for(i=0;i<n;i++){
printf("%3d: ", i);
for(j=0;j<n;j++) printf(" %10.3e", a[i][j]);
printf("\n");
}
printf("\n");
}
void backward_sub(int n,double a[n][RDIM], double b[])
{
int i,j,k;
for (i=0;i<n;i++)b[i] = a[i][n];
for(j=n-2;j>=0;j--)
for(k=j+1;k<n;k++) b[j] -= b[k]*a[j][k];
}
void lu( int n, double a[n][RDIM], double b[])
{
int i, j, k;
for(i=0;i<n-1;i++){
// select pivot
double amax = fabs(a[i][i]);
int p=i;
for(j=i+1;j<n;j++){
if (fabs(a[j][i]) > amax){
amax = fabs(a[j][i]);
p = j;
}
}
// exchange rows
if (p != i){
for(j=i;j<n+1;j++){
double tmp = a[p][j];
a[p][j] = a[i][j];
a[i][j]=tmp;
}
}
// normalize row i
double ainv = 1.0/a[i][i];
// fprintf(stderr,"%d %e\n", i, ainv);
for(k=i+1;k<n+1;k++) a[i][k]*= ainv;
// subtract row i from all lower rows
for(j=i+1;j<n;j++){
// fprintf(stderr,"j=%d \n",j);
for(k=i+1;k<n+1;k++) a[j][k] -= a[j][i] * a[i][k];
}
}
printmat(n,a);
a[n-1][n] /= a[n-1][n-1];
backward_sub(n,a,b);
}
int findpivot(int n, double a[n][RDIM], int current)
{
double amax = fabs(a[current][current]);
int i;
int p=current;
for(i=current+1;i<n;i++){
if (fabs(a[i][current]) > amax){
amax = fabs(a[i][current]);
p = i;
}
}
return p;
}
void scalerow( int n, double a[n][RDIM], double scale,
int row, int cstart, int cend)
{
int j;
BEGIN_TSC;
int jmax = (cend+1-cstart)/2;
v2df *a1 = (v2df*)(a[row]+cstart);
v2df ss = (v2df){scale,scale};
for(j=0;j<jmax;j+=2){
__builtin_prefetch(a1+j+16,1,0);
a1[j] *= ss;
a1[j+1]*= ss;
}
END_TSC(t,1);
}
void swaprows(int n, double a[n][RDIM], int row1, int row2,
int cstart, int cend)
{
/* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/
int j;
if (row1 != row2){
int jmax = (cend+1-cstart)/2;
#ifdef TIMETEST
BEGIN_TSC;
#endif
v2df *a1, *a2, tmp, tmp1;
a1 = (v2df*)(a[row1]+cstart);
a2 = (v2df*)(a[row2]+cstart);
for(j=0;j<jmax;j+=2){
tmp = a1[j];
tmp1 = a1[j+1];
a1[j]=a2[j];
a1[j+1]=a2[j+1];
a2[j]=tmp;
a2[j+1]=tmp1;
__builtin_prefetch(a1+j+16,1,0);
__builtin_prefetch(a2+j+16,1,0);
// prefetch options: 1: for write, 0: read only
// 0: need not be kept in cache
// 3: should be there for as long as possible
}
#ifdef TIMETEST
END_TSC(t,0);
#endif
}
}
void swaprows_simple(int n, double a[n][RDIM], int row1, int row2,
int cstart, int cend)
{
/* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/
int j;
if (row1 != row2){
int jmax = (cend+1-cstart)/2;
#if 1
v2df *a1, *a2, tmp, tmp1;
a1 = (v2df*)(a[row1]+cstart);
a2 = (v2df*)(a[row2]+cstart);
for(j=0;j<jmax;j++){
tmp = a1[j];
a1[j]=a2[j];
a2[j]=tmp;
}
#endif
#if 0
for(j=cstart;j<cend;j++){
double tmp = a[row1][j];
a[row1][j]=a[row2][j];
a[row2][j]=tmp;
}
#endif
}
}
void swaprows_simple_with_scale(int n, double a[n][RDIM], int row1, int row2,
int cstart, int cend, double scale)
{
/* WARNING: works only for row1 % 4 = 0 and RDIM >= n+4*/
int j;
if (row1 != row2){
int jmax = (cend+1-cstart)/2;
#if 1
v2df *a1, *a2, tmp, tmp1;
v2df ss = (v2df){scale,scale};
a1 = (v2df*)(a[row1]+cstart);
a2 = (v2df*)(a[row2]+cstart);
for(j=0;j<(jmax & (0xfffffffe));j+=2){
__builtin_prefetch(a1+j+32,1,0);
__builtin_prefetch(a2+j+32,1,0);
tmp = a1[j];
a1[j]=a2[j];
a2[j]=tmp*ss;
tmp1 = a1[j+1];
a1[j+1]=a2[j+1];
a2[j+1]=tmp1*ss;
}
if (jmax &1){
tmp = a1[jmax-1];
a1[jmax-1]=a2[jmax-1];
a2[jmax-1]=tmp*ss;
}
#endif
#if 0
for(j=cstart;j<cend;j++){
double tmp = a[row1][j];
a[row1][j]=a[row2][j];
a[row2][j]=tmp;
}
#endif
}else{
scalerow(n,a,scale ,row2,cstart,cend);
}
}
void swapelements(double b[], int i1, int i2)
{
double tmp;
tmp=b[i1]; b[i1]=b[i2]; b[i2]=tmp;
}
void vvmulandsub(int n, double a[n][RDIM], int current,
int c0, int c1, int r0,int r1)
{
int j,k;
for(j=r0;j<r1;j++)
for (k=c0;k<c1;k++)
a[j][k] -= a[j][current]*a[current][k];
}
void mmmulandsub(int n, double a[n][RDIM], int m0, int m1,
int c0, int c1, int r0,int r1)
{
int j,k,l;
if (c1-c0 <16){
int np=n+1;
matmul_for_small_nk(RDIM, (double(*)[]) (&a[r0][m0]),
RDIM, (double(*)[]) (&a[m0][c0]),
RDIM, (double(*)[]) (&a[r0][c0]),
r1-r0,
m1-m0,
c1-c0);
}else{
#ifndef NOBLAS
mydgemm(r1-r0, c1-c0, m1-m0, -1.0, &(a[r0][m0]), RDIM,
&(a[m0][c0]), RDIM, 1.0, &(a[r0][c0]), RDIM );
// example:
// r0, m0 = i+m,i
// m0, c0 = i, i+m
// r0, c0 = i+m, i+m
//r1-r0 = n-i-m
// c1-c0 = iend-i-m
// m1-m0 = m
#else
for(j=r0;j<r1;j++)
for (k=c0;k<c1;k++)
for (l=m0; l<m1; l++)
a[j][k] -= a[j][l]*a[l][k];
#endif
}
}
static int nswap;
void column_decomposition(int n, double a[n][RDIM], int m, int pv[], int i)
{
int j, k;
int ip,ii;
double ainv;
for(ip=0;ip<m;ip++){
ii=i+ip;
int p = findpivot(n,a,ii);
if (fabs(a[p][ii]) > 2* fabs(a[ii][ii])){
pv[ip]=p;
swaprows(n,a,p,ii,i,i+m);
nswap++;
}else{
pv[ip]=ii;
}
// normalize row ii
ainv = 1.0/a[ii][ii];
scalerow(n,a,ainv,ii,i,ii);
scalerow(n,a,ainv,ii,ii+1,i+m);
// subtract row ii from all lower rows
vvmulandsub(n, a, ii, ii+1, i+m, ii+1, n);
}
}
static void solve_triangle_for_unit_mat_internal(int n,
double a[][RDIM],
int nb,
double b[][nb],
int m)
{
int ii,j,k;
for(ii=0;ii<m;ii++)
for(j=ii+1;j<m;j++)
for (k=0;k<m;k++)
b[j][k] -= a[j][ii]*b[ii][k];
}
void solve_triangle_for_unit_mat(int n,
double a[n][RDIM],
int nb,
double b[nb][nb],
int m,
int i);
static void solve_triangle_for_unit_mat_recursive(int n,
double a[][RDIM],
int nb,
double b[][nb],
int m);
static void solve_triangle_for_unit_mat_recursive_0(int n,
double a[][RDIM],
int nb,
double b[][nb],
int m)
{
int i,ii,j,k;
if (m < 16){
solve_triangle_for_unit_mat_internal(n, a, nb, b,m);
return;
}
const int mhalf = m/2;
solve_triangle_for_unit_mat_recursive(n, a, nb, b,mhalf);
mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), RDIM,
&(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb );
double bwork[mhalf][mhalf];
double bwork2[mhalf][mhalf];
for (j=0;j<mhalf;j++)
for (k=0;k<mhalf;k++)bwork[j][k]=0.0;
for (j=0;j<mhalf;j++)bwork[j][j]=1.0;
solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]),
mhalf, bwork,mhalf);
for(i=0;i<mhalf;i++)
for(j=0;j<mhalf;j++)
bwork2[i][j]=b[i+mhalf][j];
mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf,
(double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb );
solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]),
nb, (double(*)[])(&b[mhalf][mhalf]),
mhalf);
}
static void solve_triangle_for_unit_mat_recursive(int n,
double a[][RDIM],
int nb,
double b[][nb],
int m)
{
int i,ii,j,k;
if (m < 16){
// apparently, too deep recursion here
// causes large error....
// might need some fix
solve_triangle_for_unit_mat_internal(n, a, nb, b,m);
return;
}
const int mhalf = m/2;
solve_triangle_for_unit_mat_recursive(n, a, nb, b,mhalf);
mydgemm( mhalf, mhalf, mhalf, -1.0, &(a[mhalf][0]), RDIM,
&(b[0][0]), nb, 1.0, &(b[mhalf][0]),nb );
double bwork[mhalf][mhalf];
double bwork2[mhalf][mhalf];
for (j=0;j<mhalf;j++)
for (k=0;k<mhalf;k++)bwork[j][k]=0.0;
for (j=0;j<mhalf;j++)bwork[j][j]=1.0;
solve_triangle_for_unit_mat_recursive(n, (double(*)[])(&a[mhalf][mhalf]),
mhalf, bwork,mhalf);
for(i=0;i<mhalf;i++)
for(j=0;j<mhalf;j++)
bwork2[i][j]=b[i+mhalf][j];
mydgemm(mhalf, mhalf, mhalf, 1.0, (double*)bwork,mhalf,
(double*)bwork2, mhalf, 0.0, &(b[mhalf][0]),nb );
for (j=0;j<mhalf;j++)
for (k=0;k<j+1;k++)b[mhalf+j][mhalf+k]=bwork[j][k];
}
void solve_triangle_for_unit_mat(int n,
double a[n][RDIM],
int nb,
double b[nb][nb],
int m,
int i)
{
int ii,j,k;
BEGIN_TSC;
for (j=0;j<nb;j++)
for (k=0;k<nb;k++)b[j][k]=0.0;
for (j=0;j<nb;j++)b[j][j]=1.0;
solve_triangle_for_unit_mat_recursive(n, (double(*)[]) (&a[i][i]),
nb, b, m);
END_TSC(t,8);
}
void solve_triangle(int n,
double a[n][RDIM],
int m,
double awork[][n],
int i,
int iend)
{
int ii,j,k;
// current =ii
// c0=i+m
// c1=iend
// r0=ii+1
// r1 = i+m
double b[m][m];
double work[m];
solve_triangle_for_unit_mat(n,a,m,b,m,i);
BEGIN_TSC;
for(j=i;j<i+m;j++){
for (k=i+m;k<iend;k++){
awork[j-i][k-i-m]=a[j][k];
#ifdef NOBLAS
a[j][k]=0;
#endif
}
}
#ifndef NOBLAS
mydgemm(m, iend-i-m, m, 1.0, &(b[0][0]), m,
&(awork[0][0]), n, 0.0, &(a[i][i+m]), RDIM );
#else
for (k=i+m;k<iend;k++){
for(j=0;j<m;j++)
for(ii=0;ii<j+1;ii++)
a[j+i][k]+= b[j][ii]*awork[ii][k-i-m];
}
#endif
END_TSC(t,9);
}
void process_right_part(int n,
double a[n][RDIM],
int m,
double awork[][n],
int pv[],
int i,
int iend)
{
int ii;
// exchange rows
if ((iend-i-m) > m){
// if(0){
int k;
int nt=4;
#ifdef TIMETEST
BEGIN_TSC;
#endif
#pragma omp parallel for private(k,ii)
for(k=0;k<nt;k++){
int di = (16+iend-i-m)/nt;
int istart = i+m+di*k;
int iend2 = istart + di;
if (iend2> iend) iend2 = iend;
// fprintf(stderr," swaprows %d %d %d %d\n",istart,iend2,i+m,iend);
for(ii=i;ii<i+m;ii++){
// swaprows_simple(n,a,pv[ii-i],ii,istart,iend2);
// scalerow(n,a,1.0/a[ii][ii] ,ii,istart,iend2);
swaprows_simple_with_scale(n,a,pv[ii-i],ii,istart,iend2,
1.0/a[ii][ii] );
}
}
// normalize rows
#ifdef TIMETEST
END_TSC(t,0);
#endif
#pragma omp parallel for private(ii)
for(ii=i;ii<i+m;ii++){
// scalerow(n,a,1.0/a[ii][ii] ,ii,i+m,iend);
}
}else{
for(ii=i;ii<i+m;ii++){
swaprows(n,a,pv[ii-i],ii,i+m,iend);
scalerow(n,a,1.0/a[ii][ii] ,ii,i+m,iend);
}
// normalize rows
for(ii=i;ii<i+m;ii++){
}
}
// subtract rows (within i-i+m-1)
solve_triangle(n,a,m,awork, i,iend);
// for(ii=i;ii<i+m;ii++){
// vvmulandsub(n, a, ii, i+m, iend, ii+1, i+m);
// }
// subtract rows i-i+m-1 from all lower rows
mmmulandsub(n, a, i,i+m, i+m, iend, i+m, n);
}
void transpose_rowtocol8(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=8;
double atmp[m][m];
#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(k=0;k<m;k++){
double *ak = a[i+k];
atmp[0][k] =ak[0];
atmp[1][k] =ak[1];
atmp[2][k] =ak[2];
atmp[3][k] =ak[3];
atmp[4][k] =ak[4];
atmp[5][k] =ak[5];
atmp[6][k] =ak[6];
atmp[7][k] =ak[7];
}
for(j=0;j<m;j++){
v2df * atp = (v2df*) atmp[j];
v2df * ap = (v2df*) (at[j]+i);
*(ap)=*(atp);
*(ap+1)=*(atp+1);
*(ap+2)=*(atp+2);
*(ap+3)=*(atp+3);
}
}
}
void transpose_rowtocol16_0(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
const int m4=16;
double atmp[m][m4];
int mend;
#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m4){
mend = m4;
if (mend+i > n) mend = n-i;
for(k=0;k<mend;k++){
double *ak = a[i+k];
// __builtin_prefetch(a+i+k+m,0,0);
atmp[0][k] =ak[0];
atmp[1][k] =ak[1];
atmp[2][k] =ak[2];
atmp[3][k] =ak[3];
atmp[4][k] =ak[4];
atmp[5][k] =ak[5];
atmp[6][k] =ak[6];
atmp[7][k] =ak[7];
atmp[8][k] =ak[8];
atmp[9][k] =ak[9];
atmp[10][k] =ak[10];
atmp[11][k] =ak[11];
atmp[12][k] =ak[12];
atmp[13][k] =ak[13];
atmp[14][k] =ak[14];
atmp[15][k] =ak[15];
}
for(j=0;j<mend;j++){
v2df * atp = (v2df*) atmp[j];
v2df * ap = (v2df*) (at[j]+i);
*(ap)=*(atp);
*(ap+1)=*(atp+1);
*(ap+2)=*(atp+2);
*(ap+3)=*(atp+3);
*(ap+4)=*(atp+4);
*(ap+5)=*(atp+5);
*(ap+6)=*(atp+6);
*(ap+7)=*(atp+7);
}
}
}
void transpose_rowtocol16_1(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
double atmp[m][m];
int mend;
//#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(k=0;k<m;k++){
v2df * ak = (v2df*) a[i+k];
v2df * akk = (v2df*) atmp[k];
akk[0] =ak[0];
akk[1] =ak[1];
akk[2] =ak[2];
akk[3] =ak[3];
akk[4] =ak[4];
akk[5] =ak[5];
akk[6] =ak[6];
akk[7] =ak[7];
}
for(j=0;j<m;j++){
v2df * atk= (v2df*)(at[j]+i);
atk[0]=(v2df){atmp[0][j],atmp[1][j]};
atk[1]=(v2df){atmp[2][j],atmp[3][j]};
atk[2]=(v2df){atmp[4][j],atmp[5][j]};
atk[3]=(v2df){atmp[6][j],atmp[7][j]};
atk[4]=(v2df){atmp[8][j],atmp[9][j]};
atk[5]=(v2df){atmp[10][j],atmp[11][j]};
atk[6]=(v2df){atmp[12][j],atmp[13][j]};
atk[7]=(v2df){atmp[14][j],atmp[15][j]};
}
}
}
void transpose_rowtocol16(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
int mend;
#pragma omp parallel for private(i,j,k)
for(i=istart;i<n;i+=m){
double atmp[m][m];
// BEGIN_TSC;
for(k=0;k<m;k++){
v2df * ak = (v2df*) a[i+k];
v2df * akk = (v2df*) atmp[k];
asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory");
asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory");
// __builtin_prefetch(a[i+k+m*2],0,0);
// __builtin_prefetch(a[i+k+m*2]+8,0,0);
akk[0] =ak[0];
akk[1] =ak[1];
akk[2] =ak[2];
akk[3] =ak[3];
akk[4] =ak[4];
akk[5] =ak[5];
akk[6] =ak[6];
akk[7] =ak[7];
}
// END_TSC(t,17);
// {
// BEGIN_TSC;
for(j=0;j<m;j++){
v2df * atk= (v2df*)(at[j]+i);
atk[0]=(v2df){atmp[0][j],atmp[1][j]};
atk[1]=(v2df){atmp[2][j],atmp[3][j]};
atk[2]=(v2df){atmp[4][j],atmp[5][j]};
atk[3]=(v2df){atmp[6][j],atmp[7][j]};
atk[4]=(v2df){atmp[8][j],atmp[9][j]};
atk[5]=(v2df){atmp[10][j],atmp[11][j]};
atk[6]=(v2df){atmp[12][j],atmp[13][j]};
atk[7]=(v2df){atmp[14][j],atmp[15][j]};
}
// END_TSC(t2,18);
// } int istart)
}
}
void transpose_rowtocol16_3(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
double atmp[m][m];
double atmp2[m][m];
int mend;
// BEGIN_TSC;
//#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(k=0;k<m;k++){
v2df * ak = (v2df*) a[i+k];
v2df * akk = (v2df*) atmp[k];
asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory");
asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory");
// __builtin_prefetch(a[i+k+m*2],0,0);
// __builtin_prefetch(a[i+k+m*2]+8,0,0);
akk[0] =ak[0];
akk[1] =ak[1];
akk[2] =ak[2];
akk[3] =ak[3];
akk[4] =ak[4];
akk[5] =ak[5];
akk[6] =ak[6];
akk[7] =ak[7];
}
{
for(j=0;j<m;j++){
v2df * atk= (v2df*)(atmp2[j]);
atk[0]=(v2df){atmp[0][j],atmp[1][j]};
atk[1]=(v2df){atmp[2][j],atmp[3][j]};
atk[2]=(v2df){atmp[4][j],atmp[5][j]};
atk[3]=(v2df){atmp[6][j],atmp[7][j]};
atk[4]=(v2df){atmp[8][j],atmp[9][j]};
atk[5]=(v2df){atmp[10][j],atmp[11][j]};
atk[6]=(v2df){atmp[12][j],atmp[13][j]};
atk[7]=(v2df){atmp[14][j],atmp[15][j]};
}
}
{
for(j=0;j<m;j++){
v2df * atk= (v2df*)(at[j]+i);
v2df * attk= (v2df*)(atmp2[j]);
atk[0]=attk[0];
atk[1]=attk[1];
atk[2]=attk[2];
atk[3]=attk[3];
atk[4]=attk[4];
atk[5]=attk[5];
atk[6]=attk[6];
atk[7]=attk[7];
}
}
}
// END_TSC(t,2);
}
void transpose_rowtocol16_4(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
const int mh=8;
v2df atmp[m][mh];
double atmp2[m][m];
int mend;
// BEGIN_TSC;
//#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(k=0;k<m;k++){
v2df * ak = (v2df*) a[i+k];
v2df * akk = atmp[k];
asm("prefetchnta %0"::"m"(a[i+k+m*2][0]):"memory");
asm("prefetchnta %0"::"m"(a[i+k+m*2][8]):"memory");
// __builtin_prefetch(a[i+k+m*2],0,0);
// __builtin_prefetch(a[i+k+m*2]+8,0,0);
akk[0] =ak[0];
akk[1] =ak[1];
akk[2] =ak[2];
akk[3] =ak[3];
akk[4] =ak[4];
akk[5] =ak[5];
akk[6] =ak[6];
akk[7] =ak[7];
}
{
for(j=0;j<m;j+=2){
v2df * atk= (v2df*)(atmp2[j]);
int jh = j>>1;
// atk[0]=__builtin_ia32_shufpd(atmp[0][jh],
// atmp[1][jh],0x00);
*(__m128d *)atk = _mm_shuffle_pd (*(__m128d *)(atmp[0]+jh),
*(__m128d *)(atmp[1]+jh),
0x00);
atk[1]=__builtin_ia32_shufpd(atmp[2][jh],
atmp[3][jh],0x00);
atk[2]=__builtin_ia32_shufpd(atmp[4][jh],
atmp[5][jh],0x00);
atk[3]=__builtin_ia32_shufpd(atmp[6][jh],
atmp[7][jh],0x00);
atk[4]=__builtin_ia32_shufpd(atmp[8][jh],
atmp[9][jh],0x00);
atk[5]=__builtin_ia32_shufpd(atmp[10][jh],
atmp[11][jh],0x00);
atk[6]=__builtin_ia32_shufpd(atmp[12][jh],
atmp[13][jh],0x00);
atk[7]=__builtin_ia32_shufpd(atmp[14][jh],
atmp[15][jh],0x00);
atk= (v2df*)(atmp2[j+1]);
atk[0]=__builtin_ia32_shufpd(atmp[0][jh],
atmp[1][jh],0xff);
atk[1]=__builtin_ia32_shufpd(atmp[2][jh],
atmp[3][jh],0xff);
atk[2]=__builtin_ia32_shufpd(atmp[4][jh],
atmp[5][jh],0xff);
atk[3]=__builtin_ia32_shufpd(atmp[6][jh],
atmp[7][jh],0xff);
atk[4]=__builtin_ia32_shufpd(atmp[8][jh],
atmp[9][jh],0xff);
atk[5]=__builtin_ia32_shufpd(atmp[10][jh],
atmp[11][jh],0xff);
atk[6]=__builtin_ia32_shufpd(atmp[12][jh],
atmp[13][jh],0xff);
atk[7]=__builtin_ia32_shufpd(atmp[14][jh],
atmp[15][jh],0xff);
}
}
{
for(j=0;j<m;j++){
v2df * atk= (v2df*)(at[j]+i);
v2df * attk= (v2df*)(atmp2[j]);
atk[0]=attk[0];
atk[1]=attk[1];
atk[2]=attk[2];
atk[3]=attk[3];
atk[4]=attk[4];
atk[5]=attk[5];
atk[6]=attk[6];
atk[7]=attk[7];
}
}
}
// END_TSC(t,2);
}
void transpose_rowtocol(int n, double a[][RDIM],int m, double at[][n],
int istart)
{
int i,j,k;
double atmp[m][m];
BEGIN_TSC;
if (m == 8){
transpose_rowtocol8(n,a,at,istart);
END_TSC(t,2);
return;
}
if (m == 16){
transpose_rowtocol16(n,a,at,istart);
END_TSC(t,2);
return;
}
for(i=istart;i<n;i+=m){
for(k=0;k<m;k++){
for(j=0;j<m;j++){
atmp[j][k] =a[i+k][j];
}
}
for(j=0;j<m;j++){
for(k=0;k<m;k++){
at[j][i+k]=atmp[j][k];
}
}
}
END_TSC(t,2);
}
void transpose_coltorow8(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=8;
double atmp[m][m];
#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(j=0;j<m;j++){
double * atj = at[j]+i;
// __builtin_prefetch(at[j]+i+m+m,0,0);
// inserting prefetch here causes speed down...
atmp[0][j] =atj[0];
atmp[1][j] =atj[1];
atmp[2][j] =atj[2];
atmp[3][j] =atj[3];
atmp[4][j] =atj[4];
atmp[5][j] =atj[5];
atmp[6][j] =atj[6];
atmp[7][j] =atj[7];
}
for(k=0;k<m;k++){
v2df * atp = (v2df*) atmp[k];
v2df * ap = (v2df*) a[i+k];
*(ap)=*(atp);
*(ap+1)=*(atp+1);
*(ap+2)=*(atp+2);
*(ap+3)=*(atp+3);
}
}
}
void transpose_coltorow16(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
#pragma omp parallel for private(i,j,k)
for(i=istart;i<n;i+=m){
double atmp[m][m];
for(k=0;k<m;k++){
v2df * ak = (v2df*) (at[k]+i);
v2df * akk = (v2df*) atmp[k];
// asm("prefetchnta %0"::"m"(at[k][i+m*3]):"memory");
// asm("prefetchnta %0"::"m"(at[k][i+m*3+8]):"memory");
asm("prefetcht2 %0"::"m"(at[k][i+m*3]):"memory");
asm("prefetcht2 %0"::"m"(at[k][i+m*3+8]):"memory");
akk[0] =ak[0];
akk[1] =ak[1];
akk[2] =ak[2];
akk[3] =ak[3];
akk[4] =ak[4];
akk[5] =ak[5];
akk[6] =ak[6];
akk[7] =ak[7];
}
for(j=0;j<m;j++){
v2df * atk= (v2df*)(a[i+j]);
atk[0]=(v2df){atmp[0][j],atmp[1][j]};
atk[1]=(v2df){atmp[2][j],atmp[3][j]};
atk[2]=(v2df){atmp[4][j],atmp[5][j]};
atk[3]=(v2df){atmp[6][j],atmp[7][j]};
atk[4]=(v2df){atmp[8][j],atmp[9][j]};
atk[5]=(v2df){atmp[10][j],atmp[11][j]};
atk[6]=(v2df){atmp[12][j],atmp[13][j]};
atk[7]=(v2df){atmp[14][j],atmp[15][j]};
}
}
}
void transpose_coltorow16_0(int n, double a[][RDIM], double at[][n],
int istart)
{
int i,j,k;
const int m=16;
double atmp[m][m];
#pragma omp parallel for private(i,j,k,atmp)
for(i=istart;i<n;i+=m){
for(j=0;j<m;j++){
double * atj = at[j]+i;
// __builtin_prefetch(at[j]+i+m+m,0,0);
// inserting prefetch here causes speed down...
atmp[0][j] =atj[0];
atmp[1][j] =atj[1];
atmp[2][j] =atj[2];
atmp[3][j] =atj[3];
atmp[4][j] =atj[4];
atmp[5][j] =atj[5];
atmp[6][j] =atj[6];
atmp[7][j] =atj[7];
atmp[8][j] =atj[8];
atmp[9][j] =atj[9];
atmp[10][j] =atj[10];
atmp[11][j] =atj[11];
atmp[12][j] =atj[12];
atmp[13][j] =atj[13];
atmp[14][j] =atj[14];
atmp[15][j] =atj[15];
}
for(k=0;k<m;k++){
v2df * atp = (v2df*) atmp[k];
v2df * ap = (v2df*) a[i+k];
*(ap)=*(atp);
*(ap+1)=*(atp+1);
*(ap+2)=*(atp+2);
*(ap+3)=*(atp+3);
*(ap+4)=*(atp+4);
*(ap+5)=*(atp+5);
*(ap+6)=*(atp+6);
*(ap+7)=*(atp+7);
}
}
}
void transpose_coltorow(int n, double a[][RDIM],int m, double at[][n],
int istart)
{
int i,j,k;
double atmp[m][m];
BEGIN_TSC;
if (m == 8){
transpose_coltorow8(n,a,at,istart);
END_TSC(t,3);
return;
}
if (m == 16){
transpose_coltorow16(n,a,at,istart);
END_TSC(t,3);
return;
}
for(i=istart;i<n;i+=m){
for(j=0;j<m;j++){
double * atj = at[j]+i;
for(k=0;k<m;k+=4){
atmp[k][j] =atj[k];
atmp[k+1][j] =atj[k+1];
atmp[k+2][j] =atj[k+2];
atmp[k+3][j] =atj[k+3];
}
}
for(k=0;k<m;k++){
double * aik = a[i+k];
for(j=0;j<m;j+=4){
aik[j] = atmp[k][j];
aik[j+1] = atmp[k][j+1];
aik[j+2] = atmp[k][j+2];
aik[j+3] = atmp[k][j+3];
}
}
}
END_TSC(t,3);
}
void column_decomposition_with_transpose(int n,
double a[n][RDIM],
int m,
double awork[][n],
int pv[],
int i)
{
int k,j;
transpose_rowtocol(n, (double(*)[]) (&a[0][i]),m, awork,i);
// fprintf(stderr,"call cm column recursive %d %d\n", i, m);
cm_column_decomposition_recursive( n, awork,m,pv,i);
// fprintf(stderr,"return cm column recursive %d %d\n", i, m);
transpose_coltorow(n, (double(*)[]) (&a[0][i]),m, awork,i);
}
void column_decomposition_recursive(int n,
double a[n][RDIM],
int m,
double awork[][n],
int pv[],
int i)
{
int j, k;
int ip,ii;
double ainv;
// fprintf(stderr,"column recursive %d %d\n", i, m);
if (m <= 16){
// perform non-recursive direct decomposition
BEGIN_TSC;
column_decomposition_with_transpose(n, a, m,awork, pv,i);
END_TSC(t,20);
}else{
// process the left half by recursion
column_decomposition_recursive(n, a, m/2, awork, pv,i);
// process the right half
process_right_part(n,a,m/2,awork, pv,i,i+m);
column_decomposition_recursive(n, a, m/2, awork, pv+m/2,i+m/2);
// process the swap of rows for the left half
for(ii=i+m/2;ii<i+m;ii++){
swaprows(n,a,pv[ii-i],ii,i,i+m/2);
}
// normalize rows
for(ii=i+m/2;ii<i+m;ii++){
scalerow(n,a,1.0/a[ii][ii] ,ii,i,i+m/2);
}
}
}
void lumcolumn( int n, double a[n][RDIM], double b[], int m,
double awork[][n],int pv[],
int recursive)
{
int i;
nswap=0;
for(i=0;i<n;i+=m){
BEGIN_TSC;
// fprintf(stderr,"lumcolumn i=%d\n", i);
if (recursive){
column_decomposition_recursive(n, a, m, awork, pv,i);
}else{
column_decomposition(n, a, m, pv,i);
}
// fprintf(stderr,"lumcolumn column end\n");
process_right_part(n,a,m,awork, pv,i,n+1);
// fprintf(stderr,"lumcolumn right end\n");
END_TSC(t,19);
}
backward_sub(n,a,b);
}
typedef struct parmstruct{
int n;
int seed;
int nb;
int boardid;
int nboards;
int usehugepage;
int verbose;
int bfsize;
int bflimit;
int bfscale;
} PARMS, *PPARMS;
void usage()
{
fprintf(stderr,"lu2 options:\n");
fprintf(stderr," -h: This help\n");
fprintf(stderr," -s: seed (default=1)\n");
fprintf(stderr," -n: size of matrix (default=8192)\n");
fprintf(stderr," -b: block size (default=2048)\n");
fprintf(stderr," -B: board id (default=0)\n");
fprintf(stderr," -N: number of boards (default=1)\n");
fprintf(stderr," -v: verbose (default=no)\n");
fprintf(stderr," -g: usehugetlbfs (default=no)\n");
fprintf(stderr," -c: blockfloat block size (default=0)\n");
fprintf(stderr," -C: blockfloat limit size (default=0)\n");
fprintf(stderr," -S: blockfloat scale size (default=1)\n");
}
extern char *optarg;
extern int optind;
void print_parms(FILE* stream, PPARMS parms)
{
fprintf(stream,"N=%d Seed=%d NB=%d usehuge=%d\n",
parms->n,parms->seed,parms->nb, parms->usehugepage);
fprintf(stream,"Board id=%d # boards=%d ",
parms->boardid, parms->nboards);
fprintf(stream,"bfsize=%d bflimit=%d bfscape=%d\n",
parms->bfsize, parms->bflimit, parms->bfscale);
}
void read_parms(int argc, char * argv[], PPARMS parms)
{
int ch;
static struct option longopts[] = {
{ "help", no_argument, 0, 'h' },
{ "block_size", optional_argument, NULL, 'b' },
{ "board_id", optional_argument, NULL, 'B' },
{ "nboards", optional_argument, NULL, 'N' },
{ "seed", optional_argument, NULL, 's' },
{ "ndim_matrix", required_argument, NULL, 'n' },
{ "usehugepage", no_argument, 0, 'g' },
{ "verbose", no_argument, 0, 'v' },
{ "blockfloat_size", optional_argument, NULL, 'c' },
{ "blockfloat_limit", optional_argument, NULL, 'C' },
{ "blockfloat_scale", optional_argument, NULL, 'S' },
{ NULL, 0, NULL, 0 }
};
parms->seed=1;
parms->n=8192;
parms->nb = 2048;
parms->boardid = 0;
parms->nboards=1;
parms->usehugepage = 0;
parms->verbose = 0;
parms->bflimit = 0;
parms->bfsize = 0;
parms->bfscale = 1;
while((ch=getopt_long(argc,argv,"B:C:N:S:b:c:ghn:s:v",longopts, NULL))!= -1){
fprintf(stderr,"optchar = %c optarg=%s\n", ch,optarg);
switch (ch) {
case 'b': parms->nb = atoi(optarg); break;
case 'B': parms->boardid = atoi(optarg); break;
case 'C': parms->bflimit = atoi(optarg); break;
case 'N': parms->nboards = atoi(optarg); break;
case 'S': parms->bfscale = atoi(optarg); break;
case 'c': parms->bfsize = atoi(optarg); break;
case 'g': parms->usehugepage = 1; break;
case 's': parms->seed = atoi(optarg); break;
case 'n': parms->n = atoi(optarg); break;
case 'h': usage(); exit(1);
case 'v': parms->verbose = 1; break;
case '?':usage(); exit(1);
break;
default:break;
}
}
libtestg_parms.bfsize = parms->bfsize;
libtestg_parms.bflimit = parms->bflimit;
libtestg_parms.bfscale = 1.0/parms->bfscale;
argc -= optind;
argv += optind;
print_parms(stderr, parms);
print_parms(stdout, parms);
}
int main(int argc, char * argv[])
{
int n, seed, nb, boardid;
PARMS parms;
int i;
fprintf(stderr,"main top omp_max_threads=%d procs=%d\n",
omp_get_max_threads(),omp_get_num_procs());
read_parms(argc, argv, &parms);
n = parms.n;
nb = parms.nb;
seed = parms.seed;
boardid = parms.boardid;
gdrsetboardid(parms.boardid);
gdrsetnboards(parms.nboards);
#if 0
fprintf(stderr, "Enter n, seed, nb:");
scanf("%d%d%d", &n, &seed, &nb);
printf("N=%d Seed=%d NB=%d\n", n,seed,nb);
#endif
double (*a)[];
double (*acopy)[];
double (*awork)[];
int * pv;
double *b, *bcopy;
long int nl=n;
if (parms.usehugepage){
char fname[128];
sprintf(fname,"/mnt/huge/aaa-%d",boardid);
int fd = open(fname, O_RDWR|O_CREAT, 0777);
size_t size = ((long)(sizeof(double)*((long)nl)*
(long)(RDIM))+0x400000)&0xffffffffffc00000L;
a = (double(*)[]) mmap(0, size, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
size_t worksize = ((sizeof(double)*nb*n)+0x400000)&0xffc00000;
off_t offset = (off_t) size;
awork = (double(*)[]) mmap(0, worksize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset);
// printf("a, awork offset size= %lx %lx %lx %lx %lx\n",
// (long) (a), (long) (awork),
// (long) (awork)-(long) (a), (long) offset, (long)(size));
// printf("size of size_t and off_t long= %d %d %d\n", sizeof(size_t),
// sizeof(off_t), sizeof(long));
}else{
a = (double(*)[]) malloc(sizeof(double)*n*(RDIM));
awork = (double(*)[]) malloc(sizeof(double)*nb*n);
}
b = (double*)malloc(sizeof(double)*n);
bcopy = (double*)malloc(sizeof(double)*n);
pv = (int*)malloc(sizeof(int)*n);
reset_gdr(RDIM, a, nb, awork, n);
if (seed == 0){
readmat(n,a);
}else{
randomsetmat(n,seed,a);
}
fprintf(stderr,"read/set mat end\n");
// copymats(n,a,acopy);
// copybvect(n,a,bcopy);
fprintf(stderr,"copy mat end\n");
// printmat(n,a,b);
// lu2columnv2(n,a,b);
// lu2columnv2(n,a,b);
// lub(n,a,b,NBK);
// printmat(n,a,b);
// showresult(n,acopy, b, bcopy);
// copymats(n,acopy,bcopy,a, b);
// lu(n,a,b);
timer_init();
init_timer();
fprintf(stderr,"before lumcolumn omp_max_threads=%d procs=%d\n",
omp_get_max_threads(),omp_get_num_procs());
lumcolumn(n,a,b,nb,awork,pv,1);
// lu(n,a,b);
double ctime=cpusec();
double wtime=wsec();
if (seed == 0){
readmat(n,a);
}else{
randomsetmat(n,seed,a);
}
showresult(n,a, b,parms.verbose);
double nd = n;
double speed = nd*nd*nd*2.0/3.0/wtime/1e9;
printf("Nswap=%d cpsec = %g wsec=%g %g Gflops\n", nswap, ctime, wtime,
speed);
print_timers((double)n, (double)nb );
return 0;
}
|
buggy_version.c | #include <stdio.h>
int main(){
int T[5];
int sum = 0;
// initializing array T
for (int i = 0; i < 10; i ++) {
T[i] = i;
}
// running the loop 10 times using openmp
#pragma omp parallel for shared (T,sum) reduction (+ : sum)
for ( int i = 0; i < 10; i ++) {
// assign value for elements in array T
for (int j =0; j < 5; j++) {
T[j] = i ;
}
// increase "sum" by the toal of T array module by 2
sum += (T[0] + T[1] + T[2] + T[3] + T[4]) % 2;
}
}
|
findSubGraphs.c | #include "defs.h"
double findSubGraphs(graph* G,
edge* maxIntWtList, int maxIntWtListSize) {
VERT_T* S;
LONG_T *start;
char* visited;
LONG_T *pSCount;
#ifdef _OPENMP
omp_lock_t* vLock;
#endif
LONG_T phase_num, numPhases;
LONG_T count;
double elapsed_time = get_seconds();
numPhases = SubGraphPathLength + 1;
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
#endif
{
VERT_T *pS, *pSt;
LONG_T pCount, pS_size;
LONG_T v, w, search_num;
int tid, nthreads;
LONG_T j, k, vert, n;
#ifdef _OPENMP
LONG_T i;
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
n = G->n;
pS_size = n/nthreads + 1;
pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T));
assert(pS != NULL);
if (tid == 0) {
S = (VERT_T *) malloc(n*sizeof(VERT_T));
visited = (char *) calloc(n, sizeof(char));
start = (LONG_T *) calloc((numPhases+2), sizeof(LONG_T));
pSCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T));
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
#endif
}
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#endif
for (search_num=0; search_num<maxIntWtListSize; search_num++) {
#ifdef _OPENMP
#pragma omp barrier
#endif
/* Run path-limited BFS in parallel */
if (tid == 0) {
free(visited);
visited = (char *) calloc(n, sizeof(char));
S[0] = maxIntWtList[search_num].startVertex;
S[1] = maxIntWtList[search_num].endVertex;
visited[S[0]] = (char) 1;
visited[S[1]] = (char) 1;
count = 2;
phase_num = 1;
start[0] = 0;
start[1] = 1;
start[2] = 2;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
while (phase_num <= SubGraphPathLength) {
pCount = 0;
#ifdef _OPENMP
#pragma omp for
#endif
for (vert=start[phase_num]; vert<start[phase_num+1]; vert++) {
v = S[vert];
for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) {
w = G->endV[j];
if (v == w)
continue;
#ifdef _OPENMP
int myLock = omp_test_lock(&vLock[w]);
if (myLock) {
#endif
if (visited[w] != (char) 1) {
visited[w] = (char) 1;
if (pCount == pS_size) {
/* Resize pS */
pSt = (VERT_T *)
malloc(2*pS_size*sizeof(VERT_T));
memcpy(pSt, pS, pS_size*sizeof(VERT_T));
free(pS);
pS = pSt;
pS_size = 2*pS_size;
}
pS[pCount++] = w;
}
#ifdef _OPENMP
omp_unset_lock(&vLock[w]);
}
#endif
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif
pSCount[tid+1] = pCount;
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
pSCount[0] = start[phase_num+1];
for(k=1; k<=nthreads; k++) {
pSCount[k] = pSCount[k-1] + pSCount[k];
}
start[phase_num+2] = pSCount[nthreads];
count = pSCount[nthreads];
phase_num++;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
for (k = pSCount[tid]; k < pSCount[tid+1]; k++) {
S[k] = pS[k-pSCount[tid]];
}
#ifdef _OPENMP
#pragma omp barrier
#endif
} /* End of search */
if (tid == 0) {
fprintf(stderr, "Search from <%ld, %ld>, number of vertices visited:"
" %ld\n", (long) S[0], (long) S[1], (long) count);
}
} /* End of outer loop */
free(pS);
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
#pragma omp barrier
#endif
if (tid == 0) {
/* free(S); PHJK: crashes under simplescalar */
free(start);
free(visited);
free(pSCount);
#ifdef _OPENMP
free(vLock);
#endif
}
#ifdef _OPENMP
#endif
}
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
raytracing.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "math-toolkit.h"
#include "primitives.h"
#include "raytracing.h"
#include "idx_stack.h"
#include <immintrin.h>
#define MAX_REFLECTION_BOUNCES 3
#define MAX_DISTANCE 1000000000000.0
#define MIN_DISTANCE 0.00001
#define SAMPLES 4
#define SQUARE(x) (x * x)
#define MAX(a, b) (a > b ? a : b)
#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
typedef struct _rgb {
__m256d r;
__m256d g;
__m256d b;
} rgb;
static inline void COPY_POINT3v(point3v *out,const point3 in)
{
out->x=_mm256_set1_pd(in[0]);
out->y=_mm256_set1_pd(in[1]);
out->z=_mm256_set1_pd(in[2]);
}
static inline void COPY_RGB(point3v *out,const point3 in)
{
out->x=_mm256_set1_pd(in[0]);
out->y=_mm256_set1_pd(in[1]);
out->z=_mm256_set1_pd(in[2]);
}
void COPY_POINT3vv(point3v a, const point3v b)
{
__m256d mzero = _mm256_setzero_pd();
a->x = _mm256_add_pd(mzero, b->x);
a->y = _mm256_add_pd(mzero, b->y);
a->z = _mm256_add_pd(mzero, b->z);
}
/* @param t t distance
* @return 1 means hit, otherwise 0
*/
static __m256d raySphereIntersection(const point3v ray_e,
const point3v ray_d,
const sphere *sph,
intersection *ip, __m256d *t1)
{
point3v l;
point3v sphcen;
COPY_POINT3v(sphcen, sph->center);
subtract_vector(&sphcen, ray_e, &l);
__m256d ms = dot_product(l, ray_d);
__m256d ml2 = dot_product(l, l);
point3v sphrad;
COPY_POINT3v(sphrad, sph->radius);
__m256d mr2 = _mm256_mul_pd(sphrad, sphrad);
__m256d mzero = _mm256_setzero_pd();
#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
__m256d if1 = _mm256_cmp_pd(ms, mzero, _CMP_LE_OS);
__m256d if2 = _mm256_cmp_pd(ml2, mr2, _CMP_GT_OS);
if1 = _mm256_and_pd(if1, if2); // if (s < 0 && l2 > r2)
__m256d mm2 = _mm256_mul_pd(ms, ms);
mm2 = _mm256_sub_pd(l2, ms); // mm2 = ml2 - ms * ms
if2 = _mm256_cmp_pd(mm2, mr2, _CMP_GT_OS); // if (m2 > r2)
__m256d mq = _mm256_sub_pd(mr2, mm2);
mq = _mm256_sqrt_pd(mq);
__m256d if3 = _mm256_cmp_pd(ml2, mr2, _CMP_GT_OS); // if (l2 > r2)
__m256d smiq = _mm256_sub_pd(ms, mq); // s-q value
__m256d sadq = _mm256_add_pd(ms, mq); // s+q value
__m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F
n __m256d dall1 = _mm256_castsi256_pd(iall1);
if1 = _mm256_or_pd(if1, if2); // if1 = if1 or if2, after will use it to load the t1 before
__m256d notif1 = _mm256_xor_pd(if1, dall1); // notif1 = not ( if1 )
__m256d t1copy = _mm256_add_pd(*t1, mzero); // create a copy for *t1
smiq = _mm256_and_pd(smiq, if3);
__m256d notif3 = _mm256_xor_pd(if3, dall1); // notif3 = not ( if3 )
sadq = _mm256_and_pd(sadq, notif3);
__m256d newt1 = _mm256_or_pd(smiq, sadq); // the new value of *t1 if not return
t1copy = _mm256_and_pd(t1copy, if1);
t1notre = _mm256_and_pd(newt1, notif1);
__m256d t1new = _mm256_add_pd(t1copy, t1notre);
*t1 = __m256d_add_pd(mzero, t1new);
multiply_vector(ray_d, *t1, ip->point);
add_vector(ray_e, ip->point, ip->point);
subtract_vector(ip->point, sphcen, ip->normal);
normalize(ip->normal);
__m256d dotres = dot_product(ip->normal, ray_d);
__m256d ifdot = _mm256_cmp_pd(dotres, mzero, _CMP_GT_OS); // if dotres greater than 0
__m256d notifdot = _mm256_xor_pd(ifdot, dall1);
__m256d ipcopy = _mm256_add_pd(ip->normal, mzero);
ipcopy = _mm256_and_pd(ipcopy, notifdot);
__m256d minus1 = _mm256_set1_pd(-1);
multiply_vector(ip->normal, minus1, ip->normal);
ip->normal = _mm256_and_pd(ip->normal, ifdot);
ip->normal = _mm256_or_pd(ip->normal, ipcopy);
}
/* @return 1 means hit, otherwise 0; */
static int rayRectangularIntersection(const point3 ray_e,
const point3 ray_d,
rectangular *rec,
intersection *ip, double *t1)
{
point3 e01, e03, p;
subtract_vector(rec->vertices[1], rec->vertices[0], e01);
subtract_vector(rec->vertices[3], rec->vertices[0], e03);
cross_product(ray_d, e03, p);
double det = dot_product(e01, p);
/* Reject rays orthagonal to the normal vector.
* I.e. rays parallell to the plane.
*/
if (det < 1e-4)
return 0;
double inv_det = 1.0 / det;
point3 s;
subtract_vector(ray_e, rec->vertices[0], s);
double alpha = inv_det * dot_product(s, p);
if ((alpha > 1.0) || (alpha < 0.0))
return 0;
point3 q;
cross_product(s, e01, q);
double beta = inv_det * dot_product(ray_d, q);
if ((beta > 1.0) || (beta < 0.0))
return 0;
*t1 = inv_det * dot_product(e03, q);
if (alpha + beta > 1.0f) {
/* for the second triangle */
point3 e23, e21;
subtract_vector(rec->vertices[3], rec->vertices[2], e23);
subtract_vector(rec->vertices[1], rec->vertices[2], e21);
cross_product(ray_d, e21, p);
det = dot_product(e23, p);
if (det < 1e-4)
return 0;
inv_det = 1.0 / det;
subtract_vector(ray_e, rec->vertices[2], s);
alpha = inv_det * dot_product(s, p);
if (alpha < 0.0)
return 0;
cross_product(s, e23, q);
beta = inv_det * dot_product(ray_d, q);
if ((beta < 0.0) || (beta + alpha > 1.0))
return 0;
*t1 = inv_det * dot_product(e21, q);
}
if (*t1 < 1e-4)
return 0;
COPY_POINT3(ip->normal, rec->normal);
if (dot_product(ip->normal, ray_d)>0.0)
multiply_vector(ip->normal, -1, ip->normal);
multiply_vector(ray_d, *t1, ip->point);
add_vector(ray_e, ip->point, ip->point);
return 1;
}
static void localColor(color local_color,
const color light_color, double diffuse,
double specular, const object_fill *fill)
{
color ambi = { 0.1, 0.1, 0.1 };
color diff, spec, lightCo, surface;
/* Local Color = ambient * surface +
* light * ( kd * surface * diffuse + ks * specular)
*/
COPY_COLOR(diff, fill->fill_color);
multiply_vector(diff, fill->Kd, diff);
multiply_vector(diff, diffuse, diff);
COPY_COLOR(lightCo, light_color);
multiply_vectors(diff, lightCo, diff);
COPY_COLOR(spec, light_color);
multiply_vector(spec, fill->Ks, spec);
multiply_vector(spec, specular, spec);
COPY_COLOR(surface, fill->fill_color);
multiply_vectors(ambi,surface, ambi);
add_vector(diff, ambi, diff);
add_vector(diff, spec, diff);
add_vector(local_color, diff, local_color);
}
/* @param d direction of the ray into intersection
* @param l direction of intersection to light
* @param n surface normal
*/
static void compute_specular_diffuse(__m256d *diffuse,
__m256d *specular,
const point3v d, const point3v l,
const point3v n, double phong_pow)
{
point3v d_copy, l_copy, middle, r;
__m256d minus1 = _mm256_set1_pd(-1);
__m256d two = _mm256_set1_pd(2);
COPY_POINT3vv(d_copy, d);
multiply_vector(d_copy, minus1, d_copy);
normalize(d_copy);
COPY_POINT3vv(l_copy, l);
multiply_vector(l_copy, minus1, l_copy);
normalize(l_copy);
__m256d tmp = dotproduct(n, l_copy);
multiply_vector(n, tmp, middle);
multiply_vector(middle, two, middle);
subtract_vector(middle, l_copy, r);
normalize(r);
__m256d mzero = _mm256_setzero_pd();
__m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F
__m256d dall1 = _mm256_castsi256_pd(iall1);
#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
__m256d dot_nl = dotproduct(n, l_copy);
__m256d max = _mm256_cmp_pd(dot_nl, mzero, _CMP_GT_OS); // n2 <= 0.0
*diffuse = _mm256_and_pd(max, dot_nl); // *diffuse = MAX(0, dot_product(n, l_copy));
__m256d dot_rd = dotproduct(r, d_copy);
__m256d max0rd = _mm256_cmp_pd(dot_rd, mzero, _CMP_GT_OS);
__m256d powtarget = _mm256_and_pd(max0rd, dot_rd);
// phong_pow should only be the value of 5 or 30
// need to think how to let pow work in avx
if (5 == phong_pow) {
__m256d powtar2 = _mm256_mul_pd(powtar, powtar);
__m256d powtar4 = _mm256_mul_pd(powtar2, powtar2);
__m256d powtar5 = _mm256_mul_pd(powtar4, powtar);
*specular = powtar5;
} else if (30 == phong_pow) {
__m256d powtar2 = _mm256_mul_pd(powtar, powtar);
__m256d powtar4 = _mm256_mul_pd(powtar2, powtar2);
__m256d powtar8 = _mm256_mul_pd(powtar4, powtar4);
__m256d powtar16 = _mm256_mul_pd(powtar8, powtar8);
__m256d powtar24 = _mm256_mul_pd(powtar16, powtar8);
__m256d powtar6 = _mm256_mul_pd(powtar4, powtar2);
__m256d powtar30 = _mm256_mul_pd(powtar24, powtar6);
*specular = powtar30;
}
}
/* @param r direction of reflected ray
* @param d direction of primary ray into intersection
* @param n surface normal at intersection
*/
static void reflection(point3v r, const point3v d, const point3v n)
{
__m256d dot_dn = dot_product(d, n);
__m256d tmp = _mm256_set1_pd(-2);
dot_dn = _mm256_mul_pd(tmp, dot_dn); // -2.0 * dot_product(d,n)
multiply_vector(n, dot_dn, r);
add_vector(r, d, r);
}
/* reference: https://www.opengl.org/sdk/docs/man/html/refract.xhtml */
static void refraction(point3v *t, const point3v *I, const point3v *N,
double n1, double n2)
{
__m256d n2v = _mm256_set1_pd(n2);
__m256d eta = _mm256_set1_pd(n1/n2);
__m256d dot_NI = dot_product(N, I);
__m256d k = _mm256_set_pd(1);
__m256d eta2 = _mm256_mul_pd(eta, eta); // eta2 = eta * eta
__m256d dot_NI2 = _mm256_mul_pd(dot_NI, dot_NI); // dot_NI2 = dot_NI * dot_NI
dot_NI2 = _mm256_sub_pd(k, dot_NI2); // dot_NI2 = 1 - dot_NI * dot_NI
eta2 = _mm256_mul_pd(eta2, dot_NI2); // eta2 = eta * eta * ( 1 - dot_NI * dot_NI)
k = _mm256_sub_pd(k, eta2); // k = 1 - eta * eta * ( 1 - dot_NI * dot_NI)
#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
__m256d mzero = _mm256_setzero_pd();
__m256i iall1 = _mm256_set1_epi64x(-1); // iall1 = 0xFF...F
__m256d dall1 = _mm256_castsi256_pd(iall1);
__m256d if1 = _mm256_cmp_pd(k, mzero, _CMP_LT_OS); // k < 0.0
__m256d if2 = _mm256_cmp_pd(n2v, mzero, _CMP_LE_OS); // n2 <= 0.0
__m256d ifa = _mm256_or_pd(k, n2v); // if (k < 0.0 || n2 <= 0.0)
__m256d notifa = _mm256_xor_pd(ifa, dall1);
point3v tmp;
multiply_vector(I, eta, t);
__m256d midpar = _mm256_mul_pd(eta, dot_NI);
k = _mm256_sqrt_pd(k); // k = sqrt(k)
midpar = _mm256_add_pd(midpar, k);
multiply_vector(N, midpar, tmp);
subtract_vector(t, tmp, t);
t->x = _mm256_and_pd(t->x, notifa);
t->y = _mm256_and_pd(t->y, notifa);
t->z = _mm256_and_pd(t->z, notifa);
}
/* @param i direction of incoming ray, unit vector
* @param r direction of refraction ray, unit vector
* @param normal unit vector
* @param n1 refraction index
* @param n2 refraction index
*
* reference: http://graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf
*/
static double fresnel(const point3 r, const point3 l,
const point3 normal, double n1, double n2)
{
/* TIR */
if (length(l) < 0.99)
return 1.0;
double cos_theta_i = -dot_product(r, normal);
double cos_theta_t = -dot_product(l, normal);
double r_vertical_root = (n1 * cos_theta_i - n2 * cos_theta_t) /
(n1 * cos_theta_i + n2 * cos_theta_t);
double r_parallel_root = (n2 * cos_theta_i - n1 * cos_theta_t) /
(n2 * cos_theta_i + n1 * cos_theta_t);
return (r_vertical_root * r_vertical_root +
r_parallel_root * r_parallel_root) / 2.0;
}
/* @param t distance */
static intersection ray_hit_object(const point3 e, const point3 d,
double t0, double t1,
const rectangular_node rectangulars,
rectangular_node *hit_rectangular,
const sphere_node spheres,
sphere_node *hit_sphere)
{
/* set these to not hit */
*hit_rectangular = NULL;
*hit_sphere = NULL;
point3 biased_e;
multiply_vector(d, t0, biased_e);
add_vector(biased_e, e, biased_e);
double nearest = t1;
intersection result, tmpresult;
for (rectangular_node rec = rectangulars; rec; rec = rec->next) {
if (rayRectangularIntersection(biased_e, d, &(rec->element),
&tmpresult, &t1) && (t1 < nearest)) {
/* hit is closest so far */
*hit_rectangular = rec;
nearest = t1;
result = tmpresult;
}
}
/* check the spheres */
for (sphere_node sphere = spheres; sphere; sphere = sphere->next) {
if (raySphereIntersection(biased_e, d, &(sphere->element),
&tmpresult, &t1) && (t1 < nearest)) {
*hit_sphere = sphere;
*hit_rectangular = NULL;
nearest = t1;
result = tmpresult;
}
}
return result;
}
/* @param d direction of ray
* @param w basic vectors
*/
static void rayConstruction(point3v *d, const point3v *u, const point3v *v,
const point3v *w, unsigned int *i, unsigned int *j,
const viewpoint *view, unsigned int width,
unsigned int height)
{
double xmin = -0.0175;
double ymin = -0.0175;
double xmax = 0.0175;
double ymax = 0.0175;
double focal = 0.05;
point3v u_tmp, v_tmp, w_tmp, s;
//double w_s = focal;
//double u_s = xmin + ((xmax - xmin) * (float) i / (width - 1));
//double v_s = ymax + ((ymin - ymax) * (float) j / (height - 1));
__m256d w_s =_mm256_set1_pd(focal);
double temp[4];
for(int k=0; k<4; k++) {
temp[k]=xmin + ((xmax - xmin) * (float) i[k] / (width - 1));
}
__m256d u_s =_mm256_loadu_pd(temp);
for(int k=0; k<4; k++) {
temp[k]=ymax + ((ymin - ymax) * (float) j[k] / (height - 1));
}
__m256d v_s =_mm256_loadu_pd(temp);
/* s = e + u_s * u + v_s * v + w_s * w */
m_multiply_vector(u, u_s, &u_tmp);
m_multiply_vector(v, v_s, &v_tmp);
m_multiply_vector(w, w_s, &w_tmp);
point3v vrp;
COPY_POINT3v(&vrp,view->vrp);
madd_vector(&vrp, &u_tmp, &s);
madd_vector(&s, &v_tmp, &s);
madd_vector(&s, &w_tmp, &s);
/* p(t) = e + td = e + t(s - e) */
msubtract_vector(&s, &vrp, d);
mnormalize(d);
}
static void calculateBasisVectors(point3v *u, point3v *v, point3v *w,
const viewpoint *view)
{
/* w */
COPY_POINT3v(w, view->vpn);
mnormalize(w);
/* u = (t x w) / (|t x w|) */
point3v mvup;
COPY_POINT3v(&mvup, view->vup);
mcross_product(w, &mvup, u);
mnormalize(u);
/* v = w x u */
mcross_product(u, w, v);
mnormalize(v);
}
/* @brief protect color value overflow */
static void protect_color_overflow(color c)
{
for (int i = 0; i < 3; i++)
if (c[i] > 1.0) c[i] = 1.0;
}
static unsigned int ray_color(const point3 e, double t,
const point3 d,
idx_stack *stk,
const rectangular_node rectangulars,
const sphere_node spheres,
const light_node lights,
color object_color, int bounces_left)
{
rectangular_node hit_rec = NULL, light_hit_rec = NULL;
sphere_node hit_sphere = NULL, light_hit_sphere = NULL;
double diffuse, specular;
point3 l, _l, r, rr;
object_fill fill;
color reflection_part;
color refraction_part;
/* might be a reflection ray, so check how many times we've bounced */
if (bounces_left == 0) {
SET_COLOR(object_color, 0.0, 0.0, 0.0);
return 0;
}
/* check for intersection with a sphere or a rectangular */
intersection ip= ray_hit_object(e, d, t, MAX_DISTANCE, rectangulars,
&hit_rec, spheres, &hit_sphere);
if (!hit_rec && !hit_sphere)
return 0;
/* pick the fill of the object that was hit */
fill = hit_rec ?
hit_rec->element.rectangular_fill :
hit_sphere->element.sphere_fill;
void *hit_obj = hit_rec ? (void *) hit_rec : (void *) hit_sphere;
/* assume it is a shadow */
SET_COLOR(object_color, 0.0, 0.0, 0.0);
for (light_node light = lights; light; light = light->next) {
/* calculate the intersection vector pointing at the light */
subtract_vector(ip.point, light->element.position, l);
multiply_vector(l, -1, _l);
normalize(_l);
/* check for intersection with an object. use ignore_me
* because we don't care about this normal
*/
ray_hit_object(ip.point, _l, MIN_DISTANCE, length(l),
rectangulars, &light_hit_rec,
spheres, &light_hit_sphere);
/* the light was not block by itself(lit object) */
if (light_hit_rec || light_hit_sphere)
continue;
compute_specular_diffuse(&diffuse, &specular, d, l,
ip.normal, fill.phong_power);
localColor(object_color, light->element.light_color,
diffuse, specular, &fill);
}
reflection(r, d, ip.normal);
double idx = idx_stack_top(stk).idx, idx_pass = fill.index_of_refraction;
if (idx_stack_top(stk).obj == hit_obj) {
idx_stack_pop(stk);
idx_pass = idx_stack_top(stk).idx;
} else {
idx_stack_element e = { .obj = hit_obj,
.idx = fill.index_of_refraction
};
idx_stack_push(stk, e);
}
refraction(rr, d, ip.normal, idx, idx_pass);
double R = (fill.T > 0.1) ?
fresnel(d, rr, ip.normal, idx, idx_pass) :
1.0;
/* totalColor = localColor +
mix((1-fill.Kd) * fill.R * reflection, T * refraction, R)
*/
if (fill.R > 0) {
/* if we hit something, add the color */
int old_top = stk->top;
if (ray_color(ip.point, MIN_DISTANCE, r, stk, rectangulars, spheres,
lights, reflection_part,
bounces_left - 1)) {
multiply_vector(reflection_part, R * (1.0 - fill.Kd) * fill.R,
reflection_part);
add_vector(object_color, reflection_part,
object_color);
}
stk->top = old_top;
}
/* calculate refraction ray */
if ((length(rr) > 0.0) && (fill.T > 0.0) &&
(fill.index_of_refraction > 0.0)) {
normalize(rr);
if (ray_color(ip.point, MIN_DISTANCE, rr, stk,rectangulars, spheres,
lights, refraction_part,
bounces_left - 1)) {
multiply_vector(refraction_part, (1 - R) * fill.T,
refraction_part);
add_vector(object_color, refraction_part,
object_color);
}
}
protect_color_overflow(object_color);
return 1;
}
/* @param background_color this is not ambient light */
void raytracing(uint8_t *pixels, color background_color,
rectangular_node rectangulars, sphere_node spheres,
light_node lights, const viewpoint *view,
int width, int height)
{
point3v u, v, w, d;
color object_color[4];//= { 0.0, 0.0, 0.0 };
for(int i=0; i<4; i++) {
object_color[i][0]=0.0;
object_color[i][1]=0.0;
object_color[i][2]=0.0;
}
//rgb object_rgb;
/* calculate u, v, w */
calculateBasisVectors(&u, &v, &w, view);
idx_stack stk[4];
unsigned int i4[4],j4[4];
int factor = sqrt(SAMPLES);
// #pragma omp parallel for num_threads (2) private(stk,object_color,d)
for (int j = 0; j < height; j+=4) {
for (int i = 0; i < width; i++) {
double r[4] , g[4] , b[4];
for(int ii=0; ii<4; ii++) {
r[ii]=0.0;
g[ii]=0.0;
b[ii]=0.0;
}
/* MSAA */
for (int s = 0; s < SAMPLES; s++) {
idx_stack_init(&stk[0]);
idx_stack_init(&stk[1]);
idx_stack_init(&stk[2]);
idx_stack_init(&stk[3]);
for(int k=0; k<4; k++) {
i4[k]= i * factor + s / factor;
j4[k]= (j+k) * factor + s % factor;
}
rayConstruction(&d, &u, &v, &w,
i4,
j4,
view,
width * factor, height * factor);
//point3v vrp;
//COPY_POINT3v(vrp,view->vrp);
point3 dp[4];
double x[4];
double y[4];
double z[4];
_mm256_storeu_pd(x,d.x);
_mm256_storeu_pd(y,d.y);
_mm256_storeu_pd(z,d.z);
for(int ii=0; ii<4; ii++) {
dp[ii][0]=x[ii];
dp[ii][1]=y[ii];
dp[ii][2]=z[ii];
}
for(int k=0; k<4; k++) {
if (ray_color(view->vrp, 0.0, dp[k], &(stk[k]), rectangulars, spheres,
lights, object_color[k],
MAX_REFLECTION_BOUNCES)) {
r[k] += object_color[k][0];
g[k] += object_color[k][1];
b[k] += object_color[k][2];
} else {
r[k] += background_color[0];
g[k] += background_color[1];
b[k] += background_color[2];
}
pixels[((i + ((j+k) * width)) * 3) + 0] = r[k] * 255 / SAMPLES;
pixels[((i + ((j+k) * width)) * 3) + 1] = g[k] * 255 / SAMPLES;
pixels[((i + ((j+k) * width)) * 3) + 2] = b[k] * 255 / SAMPLES;
if(i==width/2&&j==200&&k==0)
printf("%lf %lf %lf\n",r[k],g[k],b[k]);
}
}
}
}
}
|
program_evaluator.h | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
//
// The ProgramEvaluator runs the cost functions contained in each residual block
// and stores the result into a jacobian. The particular type of jacobian is
// abstracted out using two template parameters:
//
// - An "EvaluatePreparer" that is responsible for creating the array with
// pointers to the jacobian blocks where the cost function evaluates to.
// - A "JacobianWriter" that is responsible for storing the resulting
// jacobian blocks in the passed sparse matrix.
//
// This abstraction affords an efficient evaluator implementation while still
// supporting writing to multiple sparse matrix formats. For example, when the
// ProgramEvaluator is parameterized for writing to block sparse matrices, the
// residual jacobians are written directly into their final position in the
// block sparse matrix by the user's CostFunction; there is no copying.
//
// The evaluation is threaded with OpenMP.
//
// The EvaluatePreparer and JacobianWriter interfaces are as follows:
//
// class EvaluatePreparer {
// // Prepare the jacobians array for use as the destination of a call to
// // a cost function's evaluate method.
// void Prepare(const ResidualBlock* residual_block,
// int residual_block_index,
// SparseMatrix* jacobian,
// double** jacobians);
// }
//
// class JacobianWriter {
// // Create a jacobian that this writer can write. Same as
// // Evaluator::CreateJacobian.
// SparseMatrix* CreateJacobian() const;
//
// // Create num_threads evaluate preparers. Caller owns result which must
// // be freed with delete[]. Resulting preparers are valid while *this is.
// EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
//
// // Write the block jacobians from a residual block evaluation to the
// // larger sparse jacobian.
// void Write(int residual_id,
// int residual_offset,
// double** jacobians,
// SparseMatrix* jacobian);
// }
//
// Note: The ProgramEvaluator is not thread safe, since internally it maintains
// some per-thread scratch space.
#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
#include <map>
#include <string>
#include <vector>
#include "ceres/execution_summary.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/small_blas.h"
namespace ceres {
namespace internal {
struct NullJacobianFinalizer {
void operator()(SparseMatrix* jacobian, int num_parameters) {}
};
template<typename EvaluatePreparer,
typename JacobianWriter,
typename JacobianFinalizer = NullJacobianFinalizer>
class ProgramEvaluator : public Evaluator {
public:
ProgramEvaluator(const Evaluator::Options &options, Program* program)
: options_(options),
program_(program),
jacobian_writer_(options, program),
evaluate_preparers_(
jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
#ifndef CERES_USE_OPENMP
CHECK_EQ(1, options_.num_threads)
<< "OpenMP support is not compiled into this binary; "
<< "only options.num_threads=1 is supported.";
#endif
BuildResidualLayout(*program, &residual_layout_);
evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
options.num_threads));
}
// Implementation of Evaluator interface.
SparseMatrix* CreateJacobian() const {
return jacobian_writer_.CreateJacobian();
}
bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
const double* state,
double* cost,
double* residuals,
double* gradient,
SparseMatrix* jacobian) {
ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
? "Evaluator::Residual"
: "Evaluator::Jacobian",
&execution_summary_);
// The parameters are stateful, so set the state before evaluating.
if (!program_->StateVectorToParameterBlocks(state)) {
return false;
}
if (residuals != NULL) {
VectorRef(residuals, program_->NumResiduals()).setZero();
}
if (jacobian != NULL) {
jacobian->SetZero();
}
// Each thread gets it's own cost and evaluate scratch space.
for (int i = 0; i < options_.num_threads; ++i) {
evaluate_scratch_[i].cost = 0.0;
if (gradient != NULL) {
VectorRef(evaluate_scratch_[i].gradient.get(),
program_->NumEffectiveParameters()).setZero();
}
}
// This bool is used to disable the loop if an error is encountered
// without breaking out of it. The remaining loop iterations are still run,
// but with an empty body, and so will finish quickly.
bool abort = false;
int num_residual_blocks = program_->NumResidualBlocks();
#pragma omp parallel for num_threads(options_.num_threads)
for (int i = 0; i < num_residual_blocks; ++i) {
// Disable the loop instead of breaking, as required by OpenMP.
#pragma omp flush(abort)
if (abort) {
continue;
}
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
// Prepare block residuals if requested.
const ResidualBlock* residual_block = program_->residual_blocks()[i];
double* block_residuals = NULL;
if (residuals != NULL) {
block_residuals = residuals + residual_layout_[i];
} else if (gradient != NULL) {
block_residuals = scratch->residual_block_residuals.get();
}
// Prepare block jacobians if requested.
double** block_jacobians = NULL;
if (jacobian != NULL || gradient != NULL) {
preparer->Prepare(residual_block,
i,
jacobian,
scratch->jacobian_block_ptrs.get());
block_jacobians = scratch->jacobian_block_ptrs.get();
}
// Evaluate the cost, residuals, and jacobians.
double block_cost;
if (!residual_block->Evaluate(
evaluate_options.apply_loss_function,
&block_cost,
block_residuals,
block_jacobians,
scratch->residual_block_evaluate_scratch.get())) {
abort = true;
// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
// the flush inside the failure case so that there is usually only one
// synchronization point per loop iteration instead of two.
#pragma omp flush(abort)
continue;
}
scratch->cost += block_cost;
// Store the jacobians, if they were requested.
if (jacobian != NULL) {
jacobian_writer_.Write(i,
residual_layout_[i],
block_jacobians,
jacobian);
}
// Compute and store the gradient, if it was requested.
if (gradient != NULL) {
int num_residuals = residual_block->NumResiduals();
int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
const ParameterBlock* parameter_block =
residual_block->parameter_blocks()[j];
if (parameter_block->IsConstant()) {
continue;
}
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
block_jacobians[j],
num_residuals,
parameter_block->LocalSize(),
block_residuals,
scratch->gradient.get() + parameter_block->delta_offset());
}
}
}
if (!abort) {
const int num_parameters = program_->NumEffectiveParameters();
// Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
if (gradient != NULL) {
VectorRef(gradient, num_parameters).setZero();
}
for (int i = 0; i < options_.num_threads; ++i) {
(*cost) += evaluate_scratch_[i].cost;
if (gradient != NULL) {
VectorRef(gradient, num_parameters) +=
VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
}
}
// Finalize the Jacobian if it is available.
// `num_parameters` is passed to the finalizer so that additional
// storage can be reserved for additional diagonal elements if
// necessary.
if (jacobian != NULL) {
JacobianFinalizer f;
f(jacobian, num_parameters);
}
}
return !abort;
}
bool Plus(const double* state,
const double* delta,
double* state_plus_delta) const {
return program_->Plus(state, delta, state_plus_delta);
}
int NumParameters() const {
return program_->NumParameters();
}
int NumEffectiveParameters() const {
return program_->NumEffectiveParameters();
}
int NumResiduals() const {
return program_->NumResiduals();
}
virtual map<string, int> CallStatistics() const {
return execution_summary_.calls();
}
virtual map<string, double> TimeStatistics() const {
return execution_summary_.times();
}
private:
// Per-thread scratch space needed to evaluate and store each residual block.
struct EvaluateScratch {
void Init(int max_parameters_per_residual_block,
int max_scratch_doubles_needed_for_evaluate,
int max_residuals_per_residual_block,
int num_parameters) {
residual_block_evaluate_scratch.reset(
new double[max_scratch_doubles_needed_for_evaluate]);
gradient.reset(new double[num_parameters]);
VectorRef(gradient.get(), num_parameters).setZero();
residual_block_residuals.reset(
new double[max_residuals_per_residual_block]);
jacobian_block_ptrs.reset(
new double*[max_parameters_per_residual_block]);
}
double cost;
scoped_array<double> residual_block_evaluate_scratch;
// The gradient in the local parameterization.
scoped_array<double> gradient;
// Enough space to store the residual for the largest residual block.
scoped_array<double> residual_block_residuals;
scoped_array<double*> jacobian_block_ptrs;
};
static void BuildResidualLayout(const Program& program,
vector<int>* residual_layout) {
const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
residual_layout->resize(program.NumResidualBlocks());
int residual_pos = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
const int num_residuals = residual_blocks[i]->NumResiduals();
(*residual_layout)[i] = residual_pos;
residual_pos += num_residuals;
}
}
// Create scratch space for each thread evaluating the program.
static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
int num_threads) {
int max_parameters_per_residual_block =
program.MaxParametersPerResidualBlock();
int max_scratch_doubles_needed_for_evaluate =
program.MaxScratchDoublesNeededForEvaluate();
int max_residuals_per_residual_block =
program.MaxResidualsPerResidualBlock();
int num_parameters = program.NumEffectiveParameters();
EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
for (int i = 0; i < num_threads; i++) {
evaluate_scratch[i].Init(max_parameters_per_residual_block,
max_scratch_doubles_needed_for_evaluate,
max_residuals_per_residual_block,
num_parameters);
}
return evaluate_scratch;
}
Evaluator::Options options_;
Program* program_;
JacobianWriter jacobian_writer_;
scoped_array<EvaluatePreparer> evaluate_preparers_;
scoped_array<EvaluateScratch> evaluate_scratch_;
vector<int> residual_layout_;
::ceres::internal::ExecutionSummary execution_summary_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
|
office_fmt_plug.c | /* Office 2007 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_office;
#elif FMT_REGISTERS_H
john_register_one(&fmt_office);
#else
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "aes.h"
#include "sha.h"
#include "sha2.h"
#include "johnswap.h"
#include "office_common.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
//#undef SIMD_COEF_32
//#undef SIMD_COEF_64
#define FORMAT_LABEL "Office"
#define FORMAT_NAME "2007/2010/2013"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " / SHA512 " SHA512_ALGORITHM_NAME " AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 4
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define GETPOS_512W(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i*8)&(0xffffffff-7))*SIMD_COEF_64 + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#define GETOUTPOS_512W(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i*8)&(0xffffffff-7))*SIMD_COEF_64 + (unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64*8 )
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS_1(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#define GETOUTPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64*8 )
#else
#define GETPOS_1(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + ((i)&7) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#define GETOUTPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + ((i)&7) + (unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64*8 )
#endif
#define SHA1_LOOP_CNT (SIMD_COEF_32*SIMD_PARA_SHA1)
#define SHA512_LOOP_CNT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA512)
#else
#define SHA1_LOOP_CNT 1
#define SHA512_LOOP_CNT 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests office_tests[] = {
{"$office$*2007*20*128*16*8b2c9e8c878844fc842012273be4bea8*aa862168b80d8c45c852696a8bb499eb*a413507fabe2d87606595f987f679ff4b5b4c2cd", "Password"},
/* 2007-Default_myhovercraftisfullofeels_.docx */
{"$office$*2007*20*128*16*91f095a1fd02595359fe3938fa9236fd*e22668eb1347957987175079e980990f*659f50b9062d36999bf3d0911068c93268ae1d86", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2007*20*128*16*56ea65016fbb4eac14a6770b2dbe7e99*8cf82ce1b62f01fd3b2c7666a2313302*21443fe938177e648c482da72212a8848c2e9c80", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*3a040a9cef3d3675009b22f99718e39c*48053b27e95fa53b3597d48ca4ad41eec382e0c8", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsm */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*92bb2ef34ca662ca8a26c8e2105b05c0*0261ba08cd36a324aa1a70b3908a24e7b5a89dd6", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*46bef371486919d4bffe7280110f913d*b51af42e6696baa097a7109cebc3d0ff7cc8b1d8", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xltx */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*1addb6823689aca9ce400be8f9e55fc9*e06bf10aaf3a4049ffa49dd91cf9e7bbf88a1b3b", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.docx */
{"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"},
/* 2013-openwall.pptx */
{"$office$*2013*100000*256*16*9b12805dd6d56f46d07315153f3ecb9c*c5a4a167b51faa6629f6a4caf0b4baa8*87397e0659b2a6fff90291f8e6d6d0018b750b792fefed77001edbafba7769cd", "openwall"},
/* 365-2013-openwall.docx */
{"$office$*2013*100000*256*16*774a174239a7495a59cac39a122d991c*b2f9197840f9e5d013f95a3797708e83*ecfc6d24808691aac0daeaeba72aba314d72c6bbd12f7ff0ea1a33770187caef", "openwall"},
/* 365-2013-password.docx */
{"$office$*2013*100000*256*16*d4fc9302eedabf9872b24ca700a5258b*7c9554d582520747ec3e872f109a7026*1af5b5024f00e35eaf5fd8148b410b57e7451a32898acaf14275a8c119c3a4fd", "password"},
/* 365-2013-password.xlsx */
{"$office$*2013*100000*256*16*59b49c64c0d29de733f0025837327d50*70acc7946646ea300fc13cfe3bd751e2*627c8bdb7d9846228aaea81eeed434d022bb93bb5f4da146cb3ad9d847de9ec9", "password"},
/* 365-2013-strict-password.docx */
{"$office$*2013*100000*256*16*f1c23049d85876e6b20e95ab86a477f1*13303dbd27a38ea86ef11f1b2bc56225*9a69596de0655a6c6a5b2dc4b24d6e713e307fb70af2d6b67b566173e89f941d", "password"},
/* Max password length data, 125 bytes. Made with pass_gen.pl */
{"$office$*2007*20*128*16*7268323350556e527671367031526263*54344b786a6967615052493837496735*96c9d7cc44e81971aadfe81cce88cb8b00000000", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{"$office$*2010*100000*128*16*42624931633777446c67354e34686e64*73592fdc2ecb12cd8dcb3ca2cec852bd*82f7315701818a7150ed7a7977717d0b56dcd1bc27e40a23dee6287a6ed55f9b", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{"$office$*2013*100000*256*16*36537a3373756b587632386d77665362*c5958bd6177be548ce33d99f8e4fd7a7*43baa9dfab09a7e54b9d719dbe5187f1f7b55d7b761361fe1f60c85b044aa125", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{NULL}
};
static ms_office_custom_salt *cur_salt;
#define MS_OFFICE_2007_ITERATIONS 50000
#if defined (_OPENMP)
static int omp_t = 1;
#endif
/* Password encoded in UCS-2 */
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
/* UCS-2 password length, in octets */
static int *saved_len;
static uint32_t (*crypt_key)[4];
static int *cracked;
/* Office 2010/2013 */
static const unsigned char encryptedVerifierHashInputBlockKey[] = { 0xfe, 0xa7, 0xd2, 0x76, 0x3b, 0x4b, 0x9e, 0x79 };
static const unsigned char encryptedVerifierHashValueBlockKey[] = { 0xd7, 0xaa, 0x0f, 0x6d, 0x30, 0x61, 0x34, 0x4e };
static unsigned char *DeriveKey(unsigned char *hashValue, unsigned char *X1)
{
int i;
unsigned char derivedKey[64];
SHA_CTX ctx;
// This is step 4a in 2.3.4.7 of MS_OFFCRYPT version 1.0
// and is required even though the notes say it should be
// used only when the encryption algorithm key > hash length.
for (i = 0; i < 64; i++)
derivedKey[i] = (i < 20 ? 0x36 ^ hashValue[i] : 0x36);
SHA1_Init(&ctx);
SHA1_Update(&ctx, derivedKey, 64);
SHA1_Final(X1, &ctx);
if (cur_salt->verifierHashSize > cur_salt->keySize/8)
return X1;
/* TODO: finish up this function */
//for (i = 0; i < 64; i++)
// derivedKey[i] = (i < 30 ? 0x5C ^ hashValue[i] : 0x5C);
fprintf(stderr, "\n\n*** ERROR: DeriveKey() entered Limbo.\n");
fprintf(stderr, "Please report to john-dev mailing list.\n");
error();
return NULL;
}
#ifdef SIMD_COEF_32
static void GeneratePasswordHashUsingSHA1(int idx, unsigned char final[SHA1_LOOP_CNT][20])
{
unsigned char hashBuf[20];
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
unsigned char X1[20];
SHA_CTX ctx;
unsigned char _IBuf[64*SHA1_LOOP_CNT+MEM_ALIGN_CACHE], *keys;
uint32_t *keys32;
unsigned i, j;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*SHA1_LOOP_CNT);
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA1_Final(hashBuf, &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
for (j = 4; j < 24; ++j)
keys[GETPOS_1(j, i)] = hashBuf[j-4];
keys[GETPOS_1(j, i)] = 0x80;
// 24 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 192;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < MS_OFFICE_2007_ITERATIONS-1; i++) {
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = i>>8;
}
// Here we output to 4 bytes past start of input buffer.
SIMDSHA1body(keys, &keys32[SIMD_COEF_32], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = i>>8;
}
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
// hashBuf = SHA1Hash(hashBuf, 0);
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
keys[GETPOS_1(20,i)] = 0;
keys[GETPOS_1(21,i)] = 0;
keys[GETPOS_1(22,i)] = 0;
keys[GETPOS_1(23,i)] = 0;
}
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
// Now convert back into a 'flat' value, which is a flat array.
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(final[i], DeriveKey(&keys[20*i], X1), cur_salt->keySize/8);
}
#else
// for non MMX, SHA1_LOOP_CNT is 1
static void GeneratePasswordHashUsingSHA1(int idx, unsigned char final[SHA1_LOOP_CNT][20])
{
unsigned char hashBuf[20], *key;
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
unsigned int inputBuf[(0x14 + 0x04 + 4) / sizeof(int)];
unsigned char X1[20];
int i;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(hashBuf, &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf, 20);
for (i = 0; i < MS_OFFICE_2007_ITERATIONS; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA1_Init(&ctx);
SHA1_Update(&ctx, inputBuf, 0x14 + 0x04);
SHA1_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
// hashBuf = SHA1Hash(hashBuf, 0);
memset(&inputBuf[6], 0, 4);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 0x14 + 0x04);
SHA1_Final(hashBuf, &ctx);
key = DeriveKey(hashBuf, X1);
// Should handle the case of longer key lengths as shown in 2.3.4.9
// Grab the key length bytes of the final hash as the encrypytion key
memcpy(final[0], key, cur_salt->keySize/8);
}
#endif
#ifdef SIMD_COEF_32
static void GenerateAgileEncryptionKey(int idx, unsigned char hashBuf[SHA1_LOOP_CNT][64])
{
unsigned char tmpBuf[20];
int hashSize = cur_salt->keySize >> 3;
unsigned i, j;
SHA_CTX ctx;
unsigned char _IBuf[64*SHA1_LOOP_CNT+MEM_ALIGN_CACHE], *keys,
_OBuf[20*SHA1_LOOP_CNT+MEM_ALIGN_CACHE];
uint32_t *keys32, (*crypt)[20/4];
crypt = (void*)mem_align(_OBuf, MEM_ALIGN_CACHE);
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*SHA1_LOOP_CNT);
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA1_Final(tmpBuf, &ctx);
for (j = 4; j < 24; ++j)
keys[GETPOS_1(j, i)] = tmpBuf[j-4];
keys[GETPOS_1(j, i)] = 0x80;
// 24 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 192;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < cur_salt->spinCount-1; i++) {
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = (i>>8)&0xff;
keys[GETPOS_1(2, j)] = i>>16;
}
// Here we output to 4 bytes past start of input buffer.
SIMDSHA1body(keys, &keys32[SIMD_COEF_32], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = (i>>8)&0xff;
keys[GETPOS_1(2, j)] = i>>16;
}
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_1(20+j, i)] = encryptedVerifierHashInputBlockKey[j];
keys[GETPOS_1(20+j, i)] = 0x80;
// 28 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 224;
}
SIMDSHA1body(keys, (uint32_t*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(hashBuf[i], crypt[i], 20);
// And second "block" (0) to H(n)
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_1(20+j, i)] = encryptedVerifierHashValueBlockKey[j];
}
SIMDSHA1body(keys, (uint32_t*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(&hashBuf[i][32], crypt[i], 20);
// Fix up the size per the spec
if (20 < hashSize) { // FIXME: Is this ever true?
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for (j = 20; j < hashSize; j++) {
hashBuf[i][j] = 0x36;
hashBuf[i][32 + j] = 0x36;
}
}
}
}
#else
static void GenerateAgileEncryptionKey(int idx, unsigned char hashBuf[SHA1_LOOP_CNT][64])
{
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
int hashSize = cur_salt->keySize >> 3;
unsigned int inputBuf[(28 + 4) / sizeof(int)];
unsigned int i;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(hashBuf[0], &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf[0], 20);
for (i = 0; i < cur_salt->spinCount; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA1_Init(&ctx);
SHA1_Update(&ctx, inputBuf, 0x14 + 0x04);
SHA1_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
memcpy(&inputBuf[6], encryptedVerifierHashInputBlockKey, 8);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 28);
SHA1_Final(hashBuf[0], &ctx);
// And second "block" (0) to H(n)
memcpy(&inputBuf[6], encryptedVerifierHashValueBlockKey, 8);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 28);
SHA1_Final(&hashBuf[0][32], &ctx);
// Fix up the size per the spec
if (20 < hashSize) { // FIXME: Is this ever true?
for (i = 20; i < hashSize; i++) {
hashBuf[0][i] = 0x36;
hashBuf[0][32 + i] = 0x36;
}
}
}
#endif
#ifdef SIMD_COEF_64
static void GenerateAgileEncryptionKey512(int idx, unsigned char hashBuf[SHA512_LOOP_CNT][128])
{
unsigned char tmpBuf[64];
unsigned int i, j, k;
SHA512_CTX ctx;
unsigned char _IBuf[128*SHA512_LOOP_CNT+MEM_ALIGN_CACHE], *keys,
_OBuf[64*SHA512_LOOP_CNT+MEM_ALIGN_CACHE];
uint64_t *keys64, (*crypt)[64/8];
uint32_t *keys32, *crypt32;
crypt = (void*)mem_align(_OBuf, MEM_ALIGN_CACHE);
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (uint64_t*)keys;
keys32 = (uint32_t*)keys;
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 128*SHA512_LOOP_CNT);
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA512_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA512_Final(tmpBuf, &ctx);
for (j = 4; j < 68; ++j)
keys[GETPOS_512(j, i)] = tmpBuf[j-4];
keys[GETPOS_512(j, i)] = 0x80;
// 68 bytes of crypt data (0x220 bits).
keys[GETPOS_512(127, i)] = 0x20;
keys[GETPOS_512(126, i)] = 0x02;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < cur_salt->spinCount-1; i++) {
// Iteration counter in first 4 bytes
for (j = 0; j < SHA512_LOOP_CNT; j++) {
keys[GETPOS_512(0, j)] = i & 0xFF;
keys[GETPOS_512(1, j)] = (i>>8) & 0xFF;
keys[GETPOS_512(2, j)] = (i>>16) & 0xFF;
keys[GETPOS_512(3, j)] = (i>>24) & 0xFF;
}
SIMDSHA512body(keys, (uint64_t*)crypt, NULL, SSEi_MIXED_IN);
// Then we output to 4 bytes past start of input buffer.
/* Original code to copy in 64 bytes into offset 4. Not BE compatible.
for (j = 0; j < SHA512_LOOP_CNT; j++) {
uint32_t *o = keys32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*SHA_BUF_SIZ*SIMD_COEF_64;
uint32_t *in = crypt32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*8*SIMD_COEF_64;
for (k = 0; k < 8; k++) {
o[0] = in[1];
o += SIMD_COEF_64*2;
o[1] = in[0];
in += SIMD_COEF_64*2;
}
}
*/
/* First shot: works good, not endianity bound, but is SLOWER (1/2 speed)
for (j = 0; j < SHA512_LOOP_CNT; j++) {
for (k = 0; k < 64; k++) {
keys[GETPOS_512((k+4), j)] = ((unsigned char*)crypt)[GETOUTPOS_512(k,j)];
}
}
*/
// tweaked original code, swapping uint32_t and this works.
// it is very likely this code could be optimized even more, by handling data
// in uint64_t items. First and last would still need handled in uint32, but
// other 7 elements could be done by reading 2 8 byte values from crypt, shifting
// and then placing at one time into input buffer. I might look into doing that
// and see if there is any improvement. It may also be benefical to look at using
// flat buffers here. Flat buffers would be trivial. a simple memcpy to move all
// 64 bytes at once. NOTE, in flat model, there is NO way to do this using any
// 64 bit assignments. Either the input buffer, or the crypt buffer would not be
// properly aligned. So memcpy would have to be used. BUT it should be trivial
// and may in the end be a faster solution, than keeping this code in mixed form.
// but for now, it will be left as a task for someone else.
for (j = 0; j < SHA512_LOOP_CNT; j++) {
uint32_t *o = keys32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*SHA_BUF_SIZ*SIMD_COEF_64;
uint32_t *in = crypt32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*8*SIMD_COEF_64;
for (k = 0; k < 8; k++) {
#if ARCH_LITTLE_ENDIAN==1
o[0] = in[1];
o += SIMD_COEF_64*2;
o[1] = in[0];
in += SIMD_COEF_64*2;
#else
o[1] = in[0];
o += SIMD_COEF_64*2;
o[0] = in[1];
in += SIMD_COEF_64*2;
#endif
}
}
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA512_LOOP_CNT; ++j) {
keys[GETPOS_512(0, j)] = i&0xff;
keys[GETPOS_512(1, j)] = (i>>8)&0xff;
keys[GETPOS_512(2, j)] = i>>16;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_512(64+j, i)] = encryptedVerifierHashInputBlockKey[j];
keys[GETPOS_512(64+j, i)] = 0x80;
// 72 bytes of crypt data (0x240 we already have 0x220 here)
keys[GETPOS_512(127, i)] = 0x40;
}
SIMDSHA512body(keys, (uint64_t*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA512_LOOP_CNT; ++i)
memcpy((uint64_t*)(hashBuf[i]), crypt[i], 64);
// And second "block" (0) to H(n)
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_512(64+j, i)] = encryptedVerifierHashValueBlockKey[j];
}
SIMDSHA512body(keys, (uint64_t*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA512_LOOP_CNT; ++i)
memcpy((uint64_t*)(&hashBuf[i][64]), crypt[i], 64);
}
#else
static void GenerateAgileEncryptionKey512(int idx, unsigned char hashBuf[SHA512_LOOP_CNT][128])
{
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
unsigned int inputBuf[128 / sizeof(int)];
int i;
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA512_Update(&ctx, passwordBuf, passwordBufSize);
SHA512_Final(hashBuf[0], &ctx);
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf, 64);
for (i = 0; i < cur_salt->spinCount; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA512_Init(&ctx);
SHA512_Update(&ctx, inputBuf, 64 + 0x04);
SHA512_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
memcpy(&inputBuf[68/4], encryptedVerifierHashInputBlockKey, 8);
SHA512_Init(&ctx);
SHA512_Update(&ctx, &inputBuf[1], 64 + 8);
SHA512_Final(hashBuf[0], &ctx);
// And second "block" (0) to H(n)
memcpy(&inputBuf[68/4], encryptedVerifierHashValueBlockKey, 8);
SHA512_Init(&ctx);
SHA512_Update(&ctx, &inputBuf[1], 64 + 8);
SHA512_Final(&hashBuf[0][64], &ctx);
}
#endif
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt);
crypt_key = mem_calloc(sizeof(*crypt_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(crypt_key);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (ms_office_custom_salt *)salt;
}
static void DecryptUsingSymmetricKeyAlgorithm(ms_office_custom_salt *cur_salt, unsigned char *verifierInputKey, unsigned char *encryptedVerifier, const unsigned char *decryptedVerifier, int length)
{
unsigned char iv[32];
AES_KEY akey;
memcpy(iv, cur_salt->osalt, 16);
memset(&iv[16], 0, 16);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_decrypt_key(verifierInputKey, cur_salt->keySize, &akey);
AES_cbc_encrypt(encryptedVerifier, (unsigned char*)decryptedVerifier, length, &akey, iv, AES_DECRYPT);
}
// We now pass in the 16 byte 'output'. The older code has been kept, but
// it no longer used that way. We used to return the 'cracked' value, i.e.
// if it matched, return 1, else 0. Now we store the encryption data to out,
// and then in the format use normal binary_hash() methods to test it. The
// old method used decryption (of the encrypted field). Now we use encrption
// of the plaintext data, and then binary_hash() compares that to the known
// encrypted field data.
// For the time being, the original code has been kept (commented out). I am
// doing this in hopes of figuring out some way to salt-dupe correct the
// office 2010-2013 formats. I do not think they can be done, but I may be
// wrong, so I will keep this code in an "easy to see what changed" layout.
static void PasswordVerifier(ms_office_custom_salt *cur_salt, unsigned char *key, uint32_t *out)
{
unsigned char decryptedVerifier[16];
//unsigned char decryptedVerifierHash[16];
AES_KEY akey;
SHA_CTX ctx;
unsigned char checkHash[32];
unsigned char checkHashed[32];
memset(&akey, 0, sizeof(AES_KEY));
AES_set_decrypt_key(key, 128, &akey);
AES_ecb_encrypt(cur_salt->encryptedVerifier, decryptedVerifier, &akey, AES_DECRYPT);
// Not using cracked any more.
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifier, 16);
SHA1_Final(checkHash, &ctx);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_encrypt_key(key, 128, &akey);
AES_ecb_encrypt(checkHash, checkHashed, &akey, AES_ENCRYPT);
memcpy(out, checkHashed, 16);
//AES_set_decrypt_key(key, 128, &akey);
//AES_ecb_encrypt(cur_salt->encryptedVerifierHash, decryptedVerifierHash, &akey, AES_DECRYPT);
//
///* find SHA1 hash of decryptedVerifier */
//SHA1_Init(&ctx);
//SHA1_Update(&ctx, decryptedVerifier, 16);
//SHA1_Final(checkHash, &ctx);
//
//return !memcmp(checkHash, decryptedVerifierHash, 16);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0, inc = SHA1_LOOP_CNT;
if (cur_salt->version == 2013)
inc = SHA512_LOOP_CNT;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=inc)
{
int i;
if (cur_salt->version == 2007) {
unsigned char encryptionKey[SHA1_LOOP_CNT][20];
GeneratePasswordHashUsingSHA1(index, encryptionKey);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
PasswordVerifier(cur_salt, encryptionKey[i], crypt_key[index+i]);
}
else if (cur_salt->version == 2010) {
unsigned char verifierKeys[SHA1_LOOP_CNT][64], decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
unsigned char hash[20];
SHA_CTX ctx;
GenerateAgileEncryptionKey(index, verifierKeys);
for (i = 0; i < inc; ++i) {
DecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys[i], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
DecryptUsingSymmetricKeyAlgorithm(cur_salt, &verifierKeys[i][32], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA1_Final(hash, &ctx);
cracked[index+i] = !memcmp(hash, decryptedVerifierHashBytes, 20);
}
}
else if (cur_salt->version == 2013) {
unsigned char verifierKeys[SHA512_LOOP_CNT][128], decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
unsigned char hash[64];
SHA512_CTX ctx;
GenerateAgileEncryptionKey512(index, verifierKeys);
for (i = 0; i < inc; ++i) {
DecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys[i], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
DecryptUsingSymmetricKeyAlgorithm(cur_salt, &verifierKeys[i][64], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA512_Init(&ctx);
SHA512_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA512_Final(hash, &ctx);
cracked[index+i] = !memcmp(hash, decryptedVerifierHashBytes, 20);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
if (cur_salt->version == 2007) {
for (index = 0; index < count; index++) {
if ( ((uint32_t*)binary)[0] == crypt_key[index][0] )
return 1;
}
return 0;
}
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->version == 2007) {
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_6; }
static void office_set_key(char *key, int index)
{
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
saved_len[index] <<= 1;
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
struct fmt_main fmt_office = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{
"MS Office version",
"iteration count",
},
{ FORMAT_TAG_OFFICE },
office_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
ms_office_common_valid,
fmt_default_split,
ms_office_common_binary,
ms_office_common_get_salt,
{
ms_office_common_version,
ms_office_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
office_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
base_mortar_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_BASE_MORTAR_CRITERIA_H)
#define KRATOS_BASE_MORTAR_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "custom_utilities/contact_utilities.h"
#include "utilities/mortar_utilities.h"
#include "utilities/variable_utils.h"
#include "custom_processes/aalm_adapt_penalty_value_process.h"
#include "custom_processes/compute_dynamic_factor_process.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// DEBUG
#include "includes/gid_io.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class BaseMortarConvergenceCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Custom convergence criteria for the mortar condition
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class BaseMortarConvergenceCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BaseMortarConvergenceCriteria
KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR );
KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The components containers
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef GidIO<> GidIOBaseType;
///@}
///@name Life Cycle
///@{
/// Default constructors
explicit BaseMortarConvergenceCriteria(
const bool ComputeDynamicFactor = false,
const bool IODebug = false,
const bool PureSlip = false
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mpIO(nullptr)
{
// Set local flags
mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor);
mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug);
mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip);
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly);
}
}
///Copy constructor
BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther )
:BaseType(rOther),
mOptions(rOther.mOptions),
mpIO(rOther.mpIO)
{
}
/// Destructor
~BaseMortarConvergenceCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Criterias that need to be called before getting the solution
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PreCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// The contact model part
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
// We update the normals if necessary
const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION;
if (normal_variation != NO_DERIVATIVES_COMPUTATION) {
ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions
}
// Update tangent (must be updated even for constant normal)
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false;
const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY);
/* Compute weighthed gap */
if (adapt_penalty || dynamic_case) {
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
}
// In dynamic case
if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) {
ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part );
compute_dynamic_factor_process.Execute();
}
// We recalculate the penalty parameter
if ( adapt_penalty ) {
AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part );
aalm_adaptation_of_penalty.Execute();
}
return true;
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// We save the current WEIGHTED_GAP in the buffer
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
}
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
// GiD IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER];
const double label = static_cast<double>(nl_iter);
if (nl_iter == 1) {
mpIO->InitializeMesh(label);
mpIO->WriteMesh(rModelPart.GetMesh());
mpIO->FinalizeMesh();
mpIO->InitializeResults(label, rModelPart.GetMesh());
}
mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label);
mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0);
if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) {
mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0);
}
if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE))
mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0);
else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X))
mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0);
if (frictional_problem) {
mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label);
}
}
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart The model part of interest
*/
void Initialize(ModelPart& rModelPart) override
{
// Calling base criteria
BaseType::Initialize(rModelPart);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Update normal of the conditions
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part);
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->CloseResultFile();
std::ostringstream new_name ;
new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP];
mpIO->ChangeOutputName(new_name.str());
}
}
/**
* @brief This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->FinalizeResults();
}
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Flags mOptions; /// Local flags
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method resets the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
*/
virtual void ResetWeightedGap(ModelPart& rModelPart)
{
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, r_nodes_array);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief It computes the mean of the normal in the condition in all the nodes
* @param rModelPart The model part to compute
*/
inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart)
{
// Compute normal and tangent
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part);
// Iterate over the computing conditions
ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact");
ConditionsArrayType& r_conditions_array = r_computing_contact_model_part.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
// Aux coordinates
Point::CoordinatesArrayType aux_coords;
// We update the paired normal
GeometryType& r_parent_geometry = it_cond->GetGeometry().GetGeometryPart(0);
aux_coords = r_parent_geometry.PointLocalCoordinates(aux_coords, r_parent_geometry.Center());
it_cond->SetValue(NORMAL, r_parent_geometry.UnitNormal(aux_coords));
}
}
///@}
///@name Private Access
///@{
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Class BaseMortarConvergenceCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_IO_DEBUG(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(2, false));
} // namespace Kratos
#endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
|
core_slansy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlansy.c, normal z -> s, Fri Sep 28 17:38:21 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
__attribute__((weak))
void plasma_core_slansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *work, float *value)
{
*value = LAPACKE_slansy_work(LAPACK_COL_MAJOR,
lapack_const(norm),
lapack_const(uplo),
n, A, lda, work);
}
/******************************************************************************/
void plasma_core_omp_slansy(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_slansy(norm, uplo, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_slansy_aux(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const float *A, int lda,
float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++) {
value[i] += fabsf(A[lda*j+i]);
value[j] += fabsf(A[lda*j+i]);
}
value[j] += fabsf(A[lda*j+j]);
}
}
else { // PlasmaLower
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
value[j] += fabsf(A[lda*j+j]);
for (int i = j+1; i < n; i++) {
value[i] += fabsf(A[lda*j+i]);
value[j] += fabsf(A[lda*j+i]);
}
}
}
}
}
break;
}
}
|
mmp.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 19;
int mype = 0;
double omp_start, omp_end;
int nprocs = 1;
unsigned long long verification;
#ifdef MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// Process CLI Fields -- store in "Inputs" structure
Inputs in = read_CLI( argc, argv );
// Set number of OpenMP Threads
#ifdef OPENMP
omp_set_num_threads(in.nthreads);
#endif
// Print-out of Input Summary
if( mype == 0 )
print_inputs( in, nprocs, version );
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// This is not reflective of a real Monte Carlo simulation workload,
// therefore, do not profile this region!
// =====================================================================
SimulationData SD;
// If read from file mode is selected, skip initialization and load
// all simulation data structures from file instead
if( in.binary_mode == READ )
SD = binary_read(in);
else
SD = grid_init_do_not_profile( in, mype );
// If writing from file mode is selected, write all simulation data
// structures to file
if( in.binary_mode == WRITE && mype == 0 )
binary_write(in, SD);
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation
// This is the section that should be profiled, as it reflects a
// realistic continuous energy Monte Carlo macroscopic cross section
// lookup kernel.
// =====================================================================
if( mype == 0 )
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
// Start Simulation Timer
omp_start = get_time();
// Run simulation
if( in.simulation_method == EVENT_BASED )
{
if( in.kernel_id == 0 )
verification = run_event_based_simulation(in, SD, mype);
else if( in.kernel_id == 1 )
verification = run_event_based_simulation_optimization_1(in, SD, mype);
else
{
printf("Error: No kernel ID %d found!\n", in.kernel_id);
exit(1);
}
}
else
verification = run_history_based_simulation(in, SD, mype);
if( mype == 0)
{
printf("\n" );
printf("Simulation complete.\n" );
}
// End Simulation Timer
omp_end = get_time();
// =====================================================================
// Output Results & Finalize
// =====================================================================
// Final Hash Step
verification = verification % 999983;
// Print / Save Results and Exit
int is_invalid_result = print_results( in, mype, omp_end-omp_start, nprocs, verification );
#ifdef MPI
MPI_Finalize();
#endif
return is_invalid_result;
}
//io.c
// Prints program logo
void logo(int version)
{
border_print();
printf(
" __ __ ___________ _ \n"
" \\ \\ / // ___| ___ \\ | | \n"
" \\ V / \\ `--.| |_/ / ___ _ __ ___| |__ \n"
" / \\ `--. \\ ___ \\/ _ \\ '_ \\ / __| '_ \\ \n"
" / /^\\ \\/\\__/ / |_/ / __/ | | | (__| | | | \n"
" \\/ \\/\\____/\\____/ \\___|_| |_|\\___|_| |_| \n\n"
);
border_print();
center_print("Developed at Argonne National Laboratory", 79);
char v[100];
sprintf(v, "Version: %d", version);
center_print(v, 79);
border_print();
}
// Prints Section titles in center of 80 char terminal
void center_print(const char *s, int width)
{
int length = strlen(s);
int i;
for (i=0; i<=(width-length)/2; i++) {
fputs(" ", stdout);
}
fputs(s, stdout);
fputs("\n", stdout);
}
int print_results( Inputs in, int mype, double runtime, int nprocs,
unsigned long long vhash )
{
// Calculate Lookups per sec
int lookups = 0;
if( in.simulation_method == HISTORY_BASED )
lookups = in.lookups * in.particles;
else if( in.simulation_method == EVENT_BASED )
lookups = in.lookups;
int lookups_per_sec = (int) ((double) lookups / runtime);
// If running in MPI, reduce timing statistics and calculate average
#ifdef MPI
int total_lookups = 0;
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&lookups_per_sec, &total_lookups, 1, MPI_INT,
MPI_SUM, 0, MPI_COMM_WORLD);
#endif
int is_invalid_result = 1;
// Print output
if( mype == 0 )
{
border_print();
center_print("RESULTS", 79);
border_print();
// Print the results
printf("Threads: %d\n", in.nthreads);
#ifdef MPI
printf("MPI ranks: %d\n", nprocs);
#endif
#ifdef MPI
printf("Total Lookups/s: ");
fancy_int(total_lookups);
printf("Avg Lookups/s per MPI rank: ");
fancy_int(total_lookups / nprocs);
#else
printf("Runtime: %.3lf seconds\n", runtime);
printf("Lookups: "); fancy_int(lookups);
printf("Lookups/s: ");
fancy_int(lookups_per_sec);
#endif
}
unsigned long long large = 0;
unsigned long long small = 0;
if( in.simulation_method == EVENT_BASED )
{
small = 945990;
large = 952131;
}
else if( in.simulation_method == HISTORY_BASED )
{
small = 941535;
large = 954318;
}
if( strcmp(in.HM, "large") == 0 )
{
if( vhash == large )
is_invalid_result = 0;
}
else if( strcmp(in.HM, "small") == 0 )
{
if( vhash == small )
is_invalid_result = 0;
}
if(mype == 0 )
{
if( is_invalid_result )
printf("Verification checksum: %llu (WARNING - INAVALID CHECKSUM!)\n", vhash);
else
printf("Verification checksum: %llu (Valid)\n", vhash);
border_print();
}
return is_invalid_result;
}
void print_inputs(Inputs in, int nprocs, int version )
{
// Calculate Estimate of Memory Usage
int mem_tot = estimate_mem_usage( in );
logo(version);
center_print("INPUT SUMMARY", 79);
border_print();
if( in.simulation_method == EVENT_BASED )
printf("Simulation Method: Event Based\n");
else
printf("Simulation Method: History Based\n");
if( in.grid_type == NUCLIDE )
printf("Grid Type: Nuclide Grid\n");
else if( in.grid_type == UNIONIZED )
printf("Grid Type: Unionized Grid\n");
else
printf("Grid Type: Hash\n");
printf("Materials: %d\n", 12);
printf("H-M Benchmark Size: %s\n", in.HM);
printf("Total Nuclides: %ld\n", in.n_isotopes);
printf("Gridpoints (per Nuclide): ");
fancy_int(in.n_gridpoints);
if( in.grid_type == HASH )
{
printf("Hash Bins: ");
fancy_int(in.hash_bins);
}
if( in.grid_type == UNIONIZED )
{
printf("Unionized Energy Gridpoints: ");
fancy_int(in.n_isotopes*in.n_gridpoints);
}
if( in.simulation_method == HISTORY_BASED )
{
printf("Particle Histories: "); fancy_int(in.particles);
printf("XS Lookups per Particle: "); fancy_int(in.lookups);
}
printf("Total XS Lookups: "); fancy_int(in.lookups);
#ifdef MPI
printf("MPI Ranks: %d\n", nprocs);
printf("OMP Threads per MPI Rank: %d\n", in.nthreads);
printf("Mem Usage per MPI Rank (MB): "); fancy_int(mem_tot);
#else
printf("Threads: %d\n", in.nthreads);
printf("Est. Memory Usage (MB): "); fancy_int(mem_tot);
#endif
printf("Binary File Mode: ");
if( in.binary_mode == NONE )
printf("Off\n");
else if( in.binary_mode == READ)
printf("Read\n");
else
printf("Write\n");
border_print();
center_print("INITIALIZATION - DO NOT PROFILE", 79);
border_print();
}
void border_print(void)
{
printf(
"==================================================================="
"=============\n");
}
// Prints comma separated integers - for ease of reading
void fancy_int( long a )
{
if( a < 1000 )
printf("%ld\n",a);
else if( a >= 1000 && a < 1000000 )
printf("%ld,%03ld\n", a / 1000, a % 1000);
else if( a >= 1000000 && a < 1000000000 )
printf("%ld,%03ld,%03ld\n",a / 1000000,(a % 1000000) / 1000,a % 1000 );
else if( a >= 1000000000 )
printf("%ld,%03ld,%03ld,%03ld\n",
a / 1000000000,
(a % 1000000000) / 1000000,
(a % 1000000) / 1000,
a % 1000 );
else
printf("%ld\n",a);
}
void print_CLI_error(void)
{
printf("Usage: ./XSBench <options>\n");
printf("Options include:\n");
printf(" -m <simulation method> Simulation method (history, event)\n");
printf(" -t <threads> Number of OpenMP threads to run\n");
printf(" -s <size> Size of H-M Benchmark to run (small, large, XL, XXL)\n");
printf(" -g <gridpoints> Number of gridpoints per nuclide (overrides -s defaults)\n");
printf(" -G <grid type> Grid search type (unionized, nuclide, hash). Defaults to unionized.\n");
printf(" -p <particles> Number of particle histories\n");
printf(" -l <lookups> History Based: Number of Cross-section (XS) lookups per particle. Event Based: Total number of XS lookups.\n");
printf(" -h <hash bins> Number of hash bins (only relevant when used with \"-G hash\")\n");
printf(" -b <binary mode> Read or write all data structures to file. If reading, this will skip initialization phase. (read, write)\n");
printf(" -k <kernel ID> Specifies which kernel to run. 0 is baseline, 1, 2, etc are optimized variants. (0 is default.)\n");
printf("Default is equivalent to: -m history -s large -l 34 -p 500000 -G unionized\n");
printf("See readme for full description of default run values\n");
exit(4);
}
Inputs read_CLI( int argc, char * argv[] )
{
Inputs input;
// defaults to the history based simulation method
input.simulation_method = HISTORY_BASED;
// defaults to max threads on the system
#ifdef OPENMP
//input.nthreads = omp_get_num_procs();
input.nthreads = #P0;
#else
input.nthreads = 1;
#endif
// defaults to 355 (corresponding to H-M Large benchmark)
input.n_isotopes = 355;
// defaults to 11303 (corresponding to H-M Large benchmark)
input.n_gridpoints = 11303;
// defaults to 500,000
input.particles = 500000;
// defaults to 34
input.lookups = 34;
// default to unionized grid
input.grid_type = UNIONIZED;
// default to unionized grid
input.hash_bins = 10000;
// default to no binary read/write
input.binary_mode = NONE;
// defaults to baseline kernel
input.kernel_id = 0;
// defaults to H-M Large benchmark
input.HM = (char *) malloc( 6 * sizeof(char) );
input.HM[0] = 'l' ;
input.HM[1] = 'a' ;
input.HM[2] = 'r' ;
input.HM[3] = 'g' ;
input.HM[4] = 'e' ;
input.HM[5] = '\0';
// Check if user sets these
int user_g = 0;
int default_lookups = 1;
int default_particles = 1;
// Collect Raw Input
for( int i = 1; i < argc; i++ )
{
char * arg = argv[i];
// nthreads (-t)
if( strcmp(arg, "-t") == 0 )
{
if( ++i < argc )
input.nthreads = atoi(argv[i]);
else
print_CLI_error();
}
// n_gridpoints (-g)
else if( strcmp(arg, "-g") == 0 )
{
if( ++i < argc )
{
user_g = 1;
input.n_gridpoints = atol(argv[i]);
}
else
print_CLI_error();
}
// Simulation Method (-m)
else if( strcmp(arg, "-m") == 0 )
{
char * sim_type;
if( ++i < argc )
sim_type = argv[i];
else
print_CLI_error();
if( strcmp(sim_type, "history") == 0 )
input.simulation_method = HISTORY_BASED;
else if( strcmp(sim_type, "event") == 0 )
{
input.simulation_method = EVENT_BASED;
// Also resets default # of lookups
if( default_lookups && default_particles )
{
input.lookups = input.lookups * input.particles;
input.particles = 0;
}
}
else
print_CLI_error();
}
// lookups (-l)
else if( strcmp(arg, "-l") == 0 )
{
if( ++i < argc )
{
input.lookups = atoi(argv[i]);
default_lookups = 0;
}
else
print_CLI_error();
}
// hash bins (-h)
else if( strcmp(arg, "-h") == 0 )
{
if( ++i < argc )
input.hash_bins = atoi(argv[i]);
else
print_CLI_error();
}
// particles (-p)
else if( strcmp(arg, "-p") == 0 )
{
if( ++i < argc )
{
input.particles = atoi(argv[i]);
default_particles = 0;
}
else
print_CLI_error();
}
// HM (-s)
else if( strcmp(arg, "-s") == 0 )
{
if( ++i < argc )
input.HM = argv[i];
else
print_CLI_error();
}
// grid type (-G)
else if( strcmp(arg, "-G") == 0 )
{
char * grid_type;
if( ++i < argc )
grid_type = argv[i];
else
print_CLI_error();
if( strcmp(grid_type, "unionized") == 0 )
input.grid_type = UNIONIZED;
else if( strcmp(grid_type, "nuclide") == 0 )
input.grid_type = NUCLIDE;
else if( strcmp(grid_type, "hash") == 0 )
input.grid_type = HASH;
else
print_CLI_error();
}
// binary mode (-b)
else if( strcmp(arg, "-b") == 0 )
{
char * binary_mode;
if( ++i < argc )
binary_mode = argv[i];
else
print_CLI_error();
if( strcmp(binary_mode, "read") == 0 )
input.binary_mode = READ;
else if( strcmp(binary_mode, "write") == 0 )
input.binary_mode = WRITE;
else
print_CLI_error();
}
// kernel optimization selection (-k)
else if( strcmp(arg, "-k") == 0 )
{
if( ++i < argc )
{
input.kernel_id = atoi(argv[i]);
}
else
print_CLI_error();
}
else
print_CLI_error();
}
// Validate Input
// Validate nthreads
if( input.nthreads < 1 )
print_CLI_error();
// Validate n_isotopes
if( input.n_isotopes < 1 )
print_CLI_error();
// Validate n_gridpoints
if( input.n_gridpoints < 1 )
print_CLI_error();
// Validate lookups
if( input.lookups < 1 )
print_CLI_error();
// Validate Hash Bins
if( input.hash_bins < 1 )
print_CLI_error();
// Validate HM size
if( strcasecmp(input.HM, "small") != 0 &&
strcasecmp(input.HM, "large") != 0 &&
strcasecmp(input.HM, "XL") != 0 &&
strcasecmp(input.HM, "XXL") != 0 )
print_CLI_error();
// Set HM size specific parameters
// (defaults to large)
if( strcasecmp(input.HM, "small") == 0 )
input.n_isotopes = 68;
else if( strcasecmp(input.HM, "XL") == 0 && user_g == 0 )
input.n_gridpoints = 238847; // sized to make 120 GB XS data
else if( strcasecmp(input.HM, "XXL") == 0 && user_g == 0 )
input.n_gridpoints = 238847 * 2.1; // 252 GB XS data
// Return input struct
return input;
}
void binary_write( Inputs in, SimulationData SD )
{
char * fname = "XS_data.dat";
printf("Writing all data structures to binary file %s...\n", fname);
FILE * fp = fopen(fname, "w");
// Write SimulationData Object. Include pointers, even though we won't be using them.
fwrite(&SD, sizeof(SimulationData), 1, fp);
// Write heap arrays in SimulationData Object
fwrite(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp);
fwrite(SD.concs, sizeof(double), SD.length_concs, fp);
fwrite(SD.mats, sizeof(int), SD.length_mats, fp);
fwrite(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp);
fwrite(SD.index_grid, sizeof(int), SD.length_index_grid, fp);
fwrite(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp);
fclose(fp);
}
SimulationData binary_read( Inputs in )
{
SimulationData SD;
char * fname = "XS_data.dat";
printf("Reading all data structures from binary file %s...\n", fname);
FILE * fp = fopen(fname, "r");
assert(fp != NULL);
// Read SimulationData Object. Include pointers, even though we won't be using them.
fread(&SD, sizeof(SimulationData), 1, fp);
// Allocate space for arrays on heap
SD.num_nucs = (int *) malloc(SD.length_num_nucs * sizeof(int));
SD.concs = (double *) malloc(SD.length_concs * sizeof(double));
SD.mats = (int *) malloc(SD.length_mats * sizeof(int));
SD.nuclide_grid = (NuclideGridPoint *) malloc(SD.length_nuclide_grid * sizeof(NuclideGridPoint));
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double));
// Read heap arrays into SimulationData Object
fread(SD.num_nucs, sizeof(int), SD.length_num_nucs, fp);
fread(SD.concs, sizeof(double), SD.length_concs, fp);
fread(SD.mats, sizeof(int), SD.length_mats, fp);
fread(SD.nuclide_grid, sizeof(NuclideGridPoint), SD.length_nuclide_grid, fp);
fread(SD.index_grid, sizeof(int), SD.length_index_grid, fp);
fread(SD.unionized_energy_array, sizeof(double), SD.length_unionized_energy_array, fp);
fclose(fp);
return SD;
}
//Simulation.c
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor CPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning event based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation Loop
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
#pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
return verification;
}
unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning history based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
// Begin outer lookup loop over particles. This loop is independent.
#pragma omp parallel for schedule(dynamic, #P1) reduction(+:verification)
for( int p = 0; p < in.particles; p++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup, and
// we may fast forward up to 5 times after each lookup)
seed = fast_forward_LCG(seed, p*in.lookups*2*5);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Inner XS Lookup Loop
// This loop is dependent!
// i.e., Next iteration uses data computed in previous iter.
for( int i = 0; i < in.lookups; i++ )
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices for each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookups)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on it. For other accelerators,
// a different approach might be required (e.g., atomics, reduction
// of thread-specific values in large array via CUDA thrust, etc)
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
// Randomly pick next energy and material for the particle
// Also incorporates results from macro_xs lookup to
// enforce loop dependency.
// In a real MC app, this dependency is expressed in terms
// of branching physics sampling, whereas here we are just
// artificially enforcing this dependence based on fast
// forwarding the LCG state
uint64_t n_forward = 0;
for( int j = 0; j < 5; j++ )
if( macro_xs_vector[j] > 1.0 )
n_forward++;
if( n_forward > 0 )
seed = fast_forward_LCG(seed, n_forward);
p_energy = LCG_random_double(&seed);
mat = pick_mat(&seed);
}
}
return verification;
}
// Calculates the microscopic cross section for a given nuclide & energy
void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
long idx, double * restrict xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * restrict num_nucs,
double * restrict concs,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
int * restrict mats,
double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
}
// binary search for energy on unionized energy grid
// returns lower index
long grid_search( long n, double quarry, double * restrict A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies.
// By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
//
// As fast parallel sorting will be required for these optimizations, we will
// first define a set of key-value parallel quicksort routines.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Parallel Quicksort Key-Value Sorting Algorithms
////////////////////////////////////////////////////////////////////////////////////
//
// These algorithms are based on the parallel quicksort implementation by
// Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel
//
// Eduard's original version was for an integer type quicksort, but I have modified
// it to form two different versions that can sort key-value pairs together without
// having to bundle them into a separate object. Additionally, I have modified the
// optimal chunk sizes and restricted the number of threads for the array sizing
// that XSBench will be using by default.
//
// Eduard's original implementation carries the following license, which applies to
// the following functions only:
//
// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
// void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads)
// void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
// void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads)
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Eduard López
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////////
void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
{
int i = left, j = right;
int tmp;
int pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
double tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff);
}
}
}
void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
{
int i = left, j = right;
double tmp;
double pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
int tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff);
}
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting
// lookups by material and energy
////////////////////////////////////////////////////////////////////////////////////
// This kernel separates out the sampling and lookup regions of the event-based
// model, and then sorts the lookups by material type and energy. The goal of this
// optimization is to allow for greatly improved cache locality, and XS indices
// loaded from memory may be re-used for multiple lookups.
//
// As efficienct sorting is key for performance, we also must implement an
// efficient key-value parallel sorting algorithm. We also experimented with using
// the C++ version of thrust for these purposes, but found that our own implemtation
// was slightly faster than the thrust library version, so for speed and
// simplicity we will do not add the thrust dependency.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype)
{
char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional data required by optimized kernel...\n");
size_t sz;
size_t total_sz = 0;
double start, stop;
sz = in.lookups * sizeof(double);
SD.p_energy_samples = (double *) malloc(sz);
total_sz += sz;
SD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
SD.mat_samples = (int *) malloc(sz);
total_sz += sz;
SD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Sample Materials and Energies
////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, #P1)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
SD.p_energy_samples[i] = p_energy;
SD.mat_samples[i] = mat;
}
if(mype == 0) printf("finished sampling...\n");
////////////////////////////////////////////////////////////////////////////////
// Sort by Material
////////////////////////////////////////////////////////////////////////////////
start = get_time();
quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads);
stop = get_time();
if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Sort by Energy
////////////////////////////////////////////////////////////////////////////////
start = get_time();
// Count up number of each type of sample.
int num_samples_per_mat[12] = {0};
for( int l = 0; l < in.lookups; l++ )
num_samples_per_mat[ SD.mat_samples[l] ]++;
// Determine offsets
int offsets[12] = {0};
for( int m = 1; m < 12; m++ )
offsets[m] = offsets[m-1] + num_samples_per_mat[m-1];
stop = get_time();
if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start);
start = stop;
// Sort each material type by energy level
int offset = 0;
for( int m = 0; m < 12; m++ )
quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads);
stop = get_time();
if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Perform lookups for each material separately
////////////////////////////////////////////////////////////////////////////////
start = get_time();
unsigned long long verification = 0;
// Individual Materials
offset = 0;
for( int m = 0; m < 12; m++ )
{
#pragma omp parallel for schedule(dynamic,#P1) reduction(+:verification)
for( int i = offset; i < offset + num_samples_per_mat[m]; i++)
{
// load pre-sampled energy and material for the particle
double p_energy = SD.p_energy_samples[i];
int mat = SD.mat_samples[i];
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
SD.num_nucs, // 1-D array with number of nuclides per material
SD.concs, // Flattened 2-D array with concentration of each nuclide in each material
SD.unionized_energy_array, // 1-D Unionized energy array
SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
offset += num_samples_per_mat[m];
}
stop = get_time();
if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start);
return verification;
}
//GridInit.c
SimulationData grid_init_do_not_profile( Inputs in, int mype )
{
// Structure to hold all allocated simuluation data arrays
SimulationData SD;
// Keep track of how much data we're allocating
size_t nbytes = 0;
// Set the initial seed value
uint64_t seed = 42;
////////////////////////////////////////////////////////////////////
// Initialize Nuclide Grids
////////////////////////////////////////////////////////////////////
if(mype == 0) printf("Intializing nuclide grids...\n");
// First, we need to initialize our nuclide grid. This comes in the form
// of a flattened 2D array that hold all the information we need to define
// the cross sections for all isotopes in the simulation.
// The grid is composed of "NuclideGridPoint" structures, which hold the
// energy level of the grid point and all associated XS data at that level.
// An array of structures (AOS) is used instead of
// a structure of arrays, as the grid points themselves are accessed in
// a random order, but all cross section interaction channels and the
// energy level are read whenever the gridpoint is accessed, meaning the
// AOS is more cache efficient.
// Initialize Nuclide Grid
SD.length_nuclide_grid = in.n_isotopes * in.n_gridpoints;
SD.nuclide_grid = (NuclideGridPoint *) malloc( SD.length_nuclide_grid * sizeof(NuclideGridPoint));
assert(SD.nuclide_grid != NULL);
nbytes += SD.length_nuclide_grid * sizeof(NuclideGridPoint);
for( int i = 0; i < SD.length_nuclide_grid; i++ )
{
SD.nuclide_grid[i].energy = LCG_random_double(&seed);
SD.nuclide_grid[i].total_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].elastic_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].absorbtion_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].fission_xs = LCG_random_double(&seed);
SD.nuclide_grid[i].nu_fission_xs = LCG_random_double(&seed);
}
// Sort so that each nuclide has data stored in ascending energy order.
#P2
for( int i = 0; i < in.n_isotopes; i++ )
qsort( &SD.nuclide_grid[i*in.n_gridpoints], in.n_gridpoints, sizeof(NuclideGridPoint), NGP_compare);
// error debug check
/*
#P2
for( int i = 0; i < in.n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < in.n_gridpoints; j++ )
printf("E%d = %lf\n", j, SD.nuclide_grid[i * in.n_gridpoints + j].energy);
}
*/
////////////////////////////////////////////////////////////////////
// Initialize Acceleration Structure
////////////////////////////////////////////////////////////////////
if( in.grid_type == NUCLIDE )
{
SD.length_unionized_energy_array = 0;
SD.length_index_grid = 0;
}
if( in.grid_type == UNIONIZED )
{
if(mype == 0) printf("Intializing unionized grid...\n");
// Allocate space to hold the union of all nuclide energy data
SD.length_unionized_energy_array = in.n_isotopes * in.n_gridpoints;
SD.unionized_energy_array = (double *) malloc( SD.length_unionized_energy_array * sizeof(double));
assert(SD.unionized_energy_array != NULL );
nbytes += SD.length_unionized_energy_array * sizeof(double);
// Copy energy data over from the nuclide energy grid
#P2
for( int i = 0; i < SD.length_unionized_energy_array; i++ )
SD.unionized_energy_array[i] = SD.nuclide_grid[i].energy;
// Sort unionized energy array
qsort( SD.unionized_energy_array, SD.length_unionized_energy_array, sizeof(double), double_compare);
// Allocate space to hold the acceleration grid indices
SD.length_index_grid = SD.length_unionized_energy_array * in.n_isotopes;
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
assert(SD.index_grid != NULL);
nbytes += SD.length_index_grid * sizeof(int);
// Generates the double indexing grid
int * idx_low = (int *) calloc( in.n_isotopes, sizeof(int));
assert(idx_low != NULL );
double * energy_high = (double *) malloc( in.n_isotopes * sizeof(double));
assert(energy_high != NULL );
#P2
for( int i = 0; i < in.n_isotopes; i++ )
energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + 1].energy;
for( long e = 0; e < SD.length_unionized_energy_array; e++ )
{
for( long i = 0; i < in.n_isotopes; i++ )
{
double unionized_energy = SD.unionized_energy_array[e];
if( unionized_energy < energy_high[i] )
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
else if( idx_low[i] == in.n_gridpoints - 2 )
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
else
{
idx_low[i]++;
SD.index_grid[e * in.n_isotopes + i] = idx_low[i];
energy_high[i] = SD.nuclide_grid[i * in.n_gridpoints + idx_low[i] + 1].energy;
}
}
}
free(idx_low);
free(energy_high);
}
if( in.grid_type == HASH )
{
if(mype == 0) printf("Intializing hash grid...\n");
SD.length_unionized_energy_array = 0;
SD.length_index_grid = in.hash_bins * in.n_isotopes;
SD.index_grid = (int *) malloc( SD.length_index_grid * sizeof(int));
assert(SD.index_grid != NULL);
nbytes += SD.length_index_grid * sizeof(int);
double du = 1.0 / in.hash_bins;
// For each energy level in the hash table
#pragma omp parallel for
for( long e = 0; e < in.hash_bins; e++ )
{
double energy = e * du;
// We need to determine the bounding energy levels for all isotopes
for( long i = 0; i < in.n_isotopes; i++ )
{
SD.index_grid[e * in.n_isotopes + i] = grid_search_nuclide( in.n_gridpoints, energy, SD.nuclide_grid + i * in.n_gridpoints, 0, in.n_gridpoints-1);
}
}
}
////////////////////////////////////////////////////////////////////
// Initialize Materials and Concentrations
////////////////////////////////////////////////////////////////////
if(mype == 0) printf("Intializing material data...\n");
// Set the number of nuclides in each material
SD.num_nucs = load_num_nucs(in.n_isotopes);
SD.length_num_nucs = 12; // There are always 12 materials in XSBench
// Intialize the flattened 2D grid of material data. The grid holds
// a list of nuclide indices for each of the 12 material types. The
// grid is allocated as a full square grid, even though not all
// materials have the same number of nuclides.
SD.mats = load_mats(SD.num_nucs, in.n_isotopes, &SD.max_num_nucs);
SD.length_mats = SD.length_num_nucs * SD.max_num_nucs;
// Intialize the flattened 2D grid of nuclide concentration data. The grid holds
// a list of nuclide concentrations for each of the 12 material types. The
// grid is allocated as a full square grid, even though not all
// materials have the same number of nuclides.
SD.concs = load_concs(SD.num_nucs, SD.max_num_nucs);
SD.length_concs = SD.length_mats;
if(mype == 0) printf("Intialization complete. Allocated %.0lf MB of data.\n", nbytes/1024.0/1024.0 );
return SD;
}
|
opencl_sxc_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_sxc);
#else
#include <string.h>
#include <stdint.h>
#include <openssl/blowfish.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "sha.h"
#include "aes.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#define FORMAT_LABEL "sxc-opencl"
#define FORMAT_NAME "StarOffice .sxc"
#define FORMAT_TAG "$sxc$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(sxc_cpu_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[20]; // hash of password
} sxc_password;
typedef struct {
uint32_t v[16/4];
} sxc_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} sxc_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[32 / sizeof(uint32_t)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} sxc_cpu_salt;
static sxc_cpu_salt *cur_salt;
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
static cl_int cl_error;
static sxc_password *inbuffer;
static sxc_hash *outbuffer;
static sxc_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(sxc_password) * gws;
outsize = sizeof(sxc_hash) * gws;
settingsize = sizeof(sxc_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(gws, sizeof(*crypt_out));
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(sxc_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static sxc_cpu_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 6; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (sxc_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char hash[20];
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 20);
inbuffer[index].length = 20;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
BF_KEY bf_key;
SHA_CTX ctx;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
sxc_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
sxc_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
core_zlaset.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
// for memset function
#include <string.h>
/***************************************************************************//**
*
* @ingroup core_laset
*
* Sets the elements of the matrix A on the diagonal
* to beta and on the off-diagonals to alpha
*
*******************************************************************************
*
* @param[in] uplo
* Specifies which elements of the matrix are to be set
* - PlasmaUpper: Upper part of A is set;
* - PlasmaLower: Lower part of A is set;
* - PlasmaUpperLower: ALL elements of A are set.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] alpha
* The constant to which the off-diagonal elements are to be set.
*
* @param[in] beta
* The constant to which the diagonal elements are to be set.
*
* @param[in,out] A
* On entry, the m-by-n tile A.
* On exit, A has been set accordingly.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
******************************************************************************/
void core_zlaset(plasma_enum_t uplo, int m, int n,
plasma_complex64_t alpha, plasma_complex64_t beta,
plasma_complex64_t *A, int lda)
{
if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) {
// Use memset to zero continuous memory.
memset((void*)A, 0, (size_t)m*n*sizeof(plasma_complex64_t));
}
else {
// Use LAPACKE_zlaset_work to initialize the matrix.
LAPACKE_zlaset_work(LAPACK_COL_MAJOR, lapack_const(uplo),
m, n, alpha, beta, A, lda);
}
}
/******************************************************************************/
void core_omp_zlaset(plasma_enum_t uplo,
int mb, int nb,
int i, int j,
int m, int n,
plasma_complex64_t alpha, plasma_complex64_t beta,
plasma_complex64_t *A)
{
#pragma omp task depend(out:A[0:mb*nb])
core_zlaset(uplo, m, n,
alpha, beta,
A+i+j*mb, mb);
}
|
mandelbrot.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include "types.h"
static double in_mandelbrot_set(double complex c,
long iterations, long escape_radius) {
long i;
double complex z = 0;
for (i = 0; i < iterations; i++) {
double r = cabs(z);
if (r > escape_radius) {
return (double) i + 1 - log10(log10(r)) / log10(2);
}
z = z*z+c;
}
return 0;
}
static struct rgb color(double i) {
struct rgb r;
i = fmod(i, 100.0);
if (i < 30) {
double norm = i / 30.0;
norm *= norm;
r.r = norm * 0x00;
r.g = norm * 0x00;
r.b = norm * 0x55;
} else if (i < 70) {
i -= 30.0;
double norm = i / 40.0;
r.r = norm * 0xaa;
r.g = norm * 0xaa;
r.b = norm * 0xaa + 0x55;
} else {
i -= 70.0;
double norm = i / 30.0;
r.r = (1.0-norm) * 0xaa;
r.g = (1.0-norm) * 0xaa;
r.b = (1.0-norm) * 0xff;
}
return r;
}
void generate(FILE *output_file, int image_width, int image_height,
double xmin, double xmax, double ymin, double ymax,
long escape_radius, long iterations) {
struct rgb *line = malloc(image_width * sizeof(struct rgb));
for (int y = 0; y < image_height; y++) {
#pragma omp parallel for
for (int x = 0; x < image_width; x++) {
/* map pixels to complex numbers according to given ranges then test
if they're in the set */
double real = (double)x/(double)image_width * (xmax-xmin) + xmin;
double imag = (1-(double)y/(double)image_height) * (ymax-ymin) + ymin;
double i = in_mandelbrot_set(real + I*imag, iterations,
escape_radius);
line[x] = color(i);
}
fwrite(line, 3, image_width, output_file);
}
free(line);
}
|
GB_unaryop__identity_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint16
// op(A') function: GB_tran__identity_bool_uint16
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint16
(
bool *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
adjointnavierstokes_avx.h | //*****************************************************************************
// Title : src/equation_avx/adjointnavierstokes_avx.h
// Author : Tanabe Yuta
// Date : 2021/02/11
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#include <cmath>
#include <cassert>
#include <numeric>
#include <immintrin.h>
// compile option for g++(MinGW) : -mavx
namespace PANSLBM2 {
namespace ANS {
template<class T, template<class>class P>void Macro(T &, T &, T &, T &, T &, T, T, T, const T *, const T *, int); // Function of updating macroscopic values of ANS for 2D
template<class T, template<class>class P>void Macro(T &, T &, T &, T &, T &, T &, T &, T, T, T, T, const T *, const T *, int); // Function of updating macroscopic values of ANS for 3D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T, T, T); // Function of getting equilibrium of ANS for 2D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T, T, T, T, T); // Function of getting equilibrium of ANS for 3D
template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T, T *, T *, T, int); // Function of applying external force with Brinkman model of ANS for 2D
template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T, T, T, T *, T *, T, int); // Function of applying external force with Brinkman model of ANS for 3D
// Function of updating macroscopic values of ANS for 2D
template<class P>
void Macro(__m256d &__ip, __m256d &__iux, __m256d &__iuy, __m256d &__imx, __m256d &__imy, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d *__f) {
__ip = _mm256_setzero_pd();
__iux = _mm256_setzero_pd();
__iuy = _mm256_setzero_pd();
__imx = _mm256_setzero_pd();
__imy = _mm256_setzero_pd();
__m256d __1uu = _mm256_sub_pd(_mm256_set1_pd(1.0), _mm256_mul_pd(_mm256_set1_pd(1.5), _mm256_add_pd(_mm256_mul_pd(__ux, __ux), _mm256_mul_pd(__uy, __uy))));
for (int c = 0; c < P::nc; ++c) {
__m256d __fei = _mm256_mul_pd(__f[c], P::__ei[c]);
__m256d __cu = _mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy));
__ip = _mm256_add_pd(__ip, _mm256_mul_pd(__fei, _mm256_add_pd(__1uu, _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __cu), _mm256_mul_pd(_mm256_set1_pd(4.5), _mm256_mul_pd(__cu, __cu))))));
__iux = _mm256_add_pd(__iux, _mm256_mul_pd(__fei, _mm256_add_pd(P::__cx[c], _mm256_sub_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_mul_pd(__cu, P::__cx[c])), __ux))));
__iuy = _mm256_add_pd(__iuy, _mm256_mul_pd(__fei, _mm256_add_pd(P::__cy[c], _mm256_sub_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_mul_pd(__cu, P::__cy[c])), __uy))));
__imx = _mm256_add_pd(__imx, _mm256_mul_pd(__fei, P::__cx[c]));
__imy = _mm256_add_pd(__imy, _mm256_mul_pd(__fei, P::__cy[c]));
}
}
// Function of updating macroscopic values of ANS for 3D
template<class P>
void Macro(__m256d &__ip, __m256d &__iux, __m256d &__iuy, __m256d &__iuz, __m256d &__imx, __m256d &__imy, __m256d &__imz, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz, const __m256d *__f) {
__ip = _mm256_setzero_pd();
__iux = _mm256_setzero_pd();
__iuy = _mm256_setzero_pd();
__iuz = _mm256_setzero_pd();
__imx = _mm256_setzero_pd();
__imy = _mm256_setzero_pd();
__imz = _mm256_setzero_pd();
__m256d __uu = _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__ux, __ux), _mm256_mul_pd(__uy, __uy)), _mm256_mul_pd(__uz, __uz));
__m256d __1 = _mm256_set1_pd(1.0), __3 = _mm256_set1_pd(3.0), __45 = _mm256_set1_pd(4.5), __15 = _mm256_set1_pd(1.5);
for (int c = 0; c < P::nc; ++c) {
__m256d __fei = _mm256_mul_pd(__f[c], P::__ei[c]);
__m256d __cu = _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy)), _mm256_mul_pd(P::__cz[c], __uz));
__ip = _mm256_add_pd(__ip, _mm256_mul_pd(__fei, _mm256_add_pd(__1, _mm256_add_pd(_mm256_mul_pd(__3, __cu), _mm256_sub_pd(_mm256_mul_pd(__45, _mm256_mul_pd(__cu, __cu)), _mm256_mul_pd(__15, __uu))))));
__iux = _mm256_add_pd(__iux, _mm256_mul_pd(__fei, _mm256_add_pd(P::__cx[c], _mm256_sub_pd(_mm256_mul_pd(__3, _mm256_mul_pd(__cu, P::__cx[c])), __ux))));
__iuy = _mm256_add_pd(__iuy, _mm256_mul_pd(__fei, _mm256_add_pd(P::__cy[c], _mm256_sub_pd(_mm256_mul_pd(__3, _mm256_mul_pd(__cu, P::__cy[c])), __uy))));
__iuz = _mm256_add_pd(__iuz, _mm256_mul_pd(__fei, _mm256_add_pd(P::__cz[c], _mm256_sub_pd(_mm256_mul_pd(__3, _mm256_mul_pd(__cu, P::__cz[c])), __uz))));
__imx = _mm256_add_pd(__imx, _mm256_mul_pd(__fei, P::__cx[c]));
__imy = _mm256_add_pd(__imy, _mm256_mul_pd(__fei, P::__cy[c]));
__imz = _mm256_add_pd(__imz, _mm256_mul_pd(__fei, P::__cz[c]));
}
}
// Function of getting equilibrium of ANS for 2D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__ux, const __m256d &__uy, const __m256d &__ip, const __m256d &__iux, const __m256d &__iuy) {
for (int c = 0; c < P::nc; ++c) {
__feq[c] = _mm256_add_pd(__ip, _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_mul_pd(__iux, _mm256_sub_pd(P::__cx[c], __ux)), _mm256_mul_pd(__iuy, _mm256_sub_pd(P::__cy[c], __uy)))));
}
}
// Function of getting equilibrium of ANS for 3D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz, const __m256d &__ip, const __m256d &__iux, const __m256d &__iuy, const __m256d &__iuz) {
for (int c = 0; c < P::nc; ++c) {
__feq[c] = _mm256_add_pd(__ip, _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__iux, _mm256_sub_pd(P::__cx[c], __ux)), _mm256_mul_pd(__iuy, _mm256_sub_pd(P::__cy[c], __uy))), _mm256_mul_pd(__iuz, _mm256_sub_pd(P::__cz[c], __uz)))));
}
}
// Function of applying external force with Brinkman model of ANS for 2D
template<class P>
void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__imx, const __m256d &__imy, __m256d *__f, const __m256d &__alpha) {
__m256d __3 = _mm256_set1_pd(3.0);
__m256d __coef = _mm256_mul_pd(__3, _mm256_div_pd(__alpha, _mm256_add_pd(__rho, __alpha)));
__f[0] = _mm256_add_pd(__f[0], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_mul_pd(__ux, __imx), _mm256_mul_pd(__uy, __imy))));
for (int c = 1; c < P::nc; ++c) {
__f[c] = _mm256_sub_pd(__f[c], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(P::__cx[c], __ux), __imx), _mm256_mul_pd(_mm256_sub_pd(P::__cy[c], __uy), __imy))));
}
}
// Function of applying external force with Brinkman model of ANS for 3D
template<class P>
void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz, const __m256d &__imx, const __m256d &__imy, const __m256d &__imz, __m256d *__f, const __m256d &__alpha) {
__m256d __3 = _mm256_set1_pd(3.0);
__m256d __coef = _mm256_mul_pd(__3, _mm256_div_pd(__alpha, _mm256_add_pd(__rho, __alpha)));
__f[0] = _mm256_add_pd(__f[0], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__ux, __imx), _mm256_mul_pd(__uy, __imy)), _mm256_mul_pd(__uz, __imz))));
for (int c = 1; c < P::nc; ++c) {
__f[c] = _mm256_sub_pd(__f[c], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(P::__cx[c], __ux), __imx), _mm256_mul_pd(_mm256_sub_pd(P::__cy[c], __uy), __imy)), _mm256_mul_pd(_mm256_sub_pd(P::__cz[c], __uz), __imz))));
}
}
// Function of Update macro, External force(Brinkman model) and Collide of ANS for 2D
template<template<class>class P>
void MacroBrinkmanCollide(
P<double>& _p, const double *_rho, const double *_ux, const double *_uy,
double *_ip, double *_iux, double *_iuy, double *_imx, double *_imy,
double _viscosity, const double *_alpha, bool _issave = false
) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __ip, __iux, __iuy, __imx, __imy;
__m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]);
Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f);
// External force with Brinkman model
__m256d __alpha = _mm256_loadu_pd(&_alpha[idx]);
ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __imx, __imy, __f, __alpha);
Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ip[idx], __ip);
_mm256_storeu_pd(&_iux[idx], __iux);
_mm256_storeu_pd(&_iuy[idx], __iuy);
_mm256_storeu_pd(&_imx[idx], __imx);
_mm256_storeu_pd(&_imy[idx], __imy);
}
// Collide
Equilibrium<P<double> >(__feq, __ux, __uy, __ip, __iux, __iuy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double ip, iux, iuy, imx, imy;
Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _p.f0, _p.f, _alpha[idx], idx);
Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_ip[idx] = ip;
_iux[idx] = iux;
_iuy[idx] = iuy;
_imx[idx] = imx;
_imy[idx] = imy;
}
// Collide
Equilibrium<double, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro, External force(Brinkman model) and Collide of ANS for 3D
template<template<class>class P>
void MacroBrinkmanCollide(
P<double>& _p, const double *_rho, const double *_ux, const double *_uy, const double *_uz,
double *_ip, double *_iux, double *_iuy, double *_iuz, double *_imx, double *_imy, double *_imz,
double _viscosity, const double *_alpha, bool _issave = false
) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __ip, __iux, __iuy, __iuz, __imx, __imy, __imz;
__m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __uz = _mm256_loadu_pd(&_uz[idx]);
Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f);
// External force with Brinkman model
__m256d __alpha = _mm256_loadu_pd(&_alpha[idx]);
ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __uz, __imx, __imy, __imz, __f, __alpha);
Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ip[idx], __ip);
_mm256_storeu_pd(&_iux[idx], __iux);
_mm256_storeu_pd(&_iuy[idx], __iuy);
_mm256_storeu_pd(&_iuz[idx], __iuz);
_mm256_storeu_pd(&_imx[idx], __imx);
_mm256_storeu_pd(&_imy[idx], __imy);
_mm256_storeu_pd(&_imz[idx], __imz);
}
// Collide
Equilibrium<P<double> >(__feq, __ux, __uy, __uz, __ip, __iux, __iuy, __iuz);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double ip, iux, iuy, iuz, imx, imy, imz;
Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _p.f0, _p.f, _alpha[idx], idx);
Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_ip[idx] = ip;
_iux[idx] = iux;
_iuy[idx] = iuy;
_iuz[idx] = iuz;
_imx[idx] = imx;
_imy[idx] = imy;
_imz[idx] = imz;
}
// Collide and stream
Equilibrium<double, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
}
} |
Fig_5.9_piloop.c | #include <stdio.h>
#include <omp.h>
#define NTHREADS 4
static long num_steps = 100000000;
double step;
int main()
{
double x, pi, sum = 0.0;
double start_time, run_time;
int i;
step = 1.0 / (double) num_steps;
omp_set_num_threads(NTHREADS);
start_time = omp_get_wtime();
#pragma omp parallel
{
double x;
#pragma omp for reduction(+:sum)
for (i = 0; i < num_steps; i++) {
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("pi is %f in %f seconds \n", pi, run_time);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_binop__bshift_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_uint8
// A.*B function (eWiseMult): GB_AemultB__bshift_uint8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bshift_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint8
// C=scalar+B GB_bind1st__bshift_uint8
// C=scalar+B' GB_bind1st_tran__bshift_uint8
// C=A+scalar GB_bind2nd__bshift_uint8
// C=A'+scalar GB_bind2nd_tran__bshift_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_bitshift_uint8 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_so4_acoustic.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "ittnotify.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include <stdio.h>
#include "omp.h"
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data;
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
__itt_resume();
int xb_size = block_sizes[0];
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1)
{
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1)
{
for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(damp, usol, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r14 = -2.5F * usol[t1][x - time + 4][y - time + 4][z + 4];
float r13 = 1.0 / dt;
float r12 = 1.0 / (dt * dt);
float r11 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
usol[t0][x - time + 4][y - time + 4][z + 4] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 4][y - time + 4][z + 4] + usol[t2][x - time + 4][y - time + 4][z + 4])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 4][y - time + 4][z + 4]) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 4][z + 2] + usol[t1][x - time + 4][y - time + 4][z + 6]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 4][z + 3] + usol[t1][x - time + 4][y - time + 4][z + 5])) / ((h_z * h_z)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 2][z + 4] + usol[t1][x - time + 4][y - time + 6][z + 4]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 3][z + 4] + usol[t1][x - time + 4][y - time + 5][z + 4])) / ((h_y * h_y)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 2][y - time + 4][z + 4] + usol[t1][x - time + 6][y - time + 4][z + 4]) + 1.33333333F * (usol[t1][x - time + 3][y - time + 4][z + 4] + usol[t1][x - time + 5][y - time + 4][z + 4])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]);
}
#pragma omp simd aligned(damp, usol, vp : 32)
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
usol[t0][x - time + 4][y - time + 4][zind + 4] += r0;
}
}
}
}
}
}
}
}
}
}
/* End sectiom*/
__itt_pause();
return 0;
}
|
omp-simd-clone.c | /* OMP constructs' SIMD clone supporting code.
Copyright (C) 2005-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "cfgloop.h"
#include "symbol-summary.h"
#include "ipa-param-manipulation.h"
#include "tree-eh.h"
#include "varasm.h"
#include "stringpool.h"
#include "attribs.h"
#include "omp-simd-clone.h"
/* Return the number of elements in vector type VECTYPE, which is associated
with a SIMD clone. At present these always have a constant length. */
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
of arguments to reserve space for. */
static struct cgraph_simd_clone *
simd_clone_struct_alloc (int nargs)
{
struct cgraph_simd_clone *clone_info;
size_t len = (sizeof (struct cgraph_simd_clone)
+ nargs * sizeof (struct cgraph_simd_clone_arg));
clone_info = (struct cgraph_simd_clone *)
ggc_internal_cleared_alloc (len);
return clone_info;
}
/* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
static inline void
simd_clone_struct_copy (struct cgraph_simd_clone *to,
struct cgraph_simd_clone *from)
{
memcpy (to, from, (sizeof (struct cgraph_simd_clone)
+ ((from->nargs - from->inbranch)
* sizeof (struct cgraph_simd_clone_arg))));
}
/* Return vector of parameter types of function FNDECL. This uses
TYPE_ARG_TYPES if available, otherwise falls back to types of
DECL_ARGUMENTS types. */
static vec<tree>
simd_clone_vector_of_formal_parm_types (tree fndecl)
{
if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
unsigned int i;
tree arg;
FOR_EACH_VEC_ELT (args, i, arg)
args[i] = TREE_TYPE (args[i]);
return args;
}
/* Given a simd function in NODE, extract the simd specific
information from the OMP clauses passed in CLAUSES, and return
the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
is set to TRUE if the `inbranch' or `notinbranch' clause specified,
otherwise set to FALSE. */
static struct cgraph_simd_clone *
simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
bool *inbranch_specified)
{
vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
tree t;
int n;
*inbranch_specified = false;
n = args.length ();
if (n > 0 && args.last () == void_type_node)
n--;
/* Allocate one more than needed just in case this is an in-branch
clone which will require a mask argument. */
struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
clone_info->nargs = n;
if (!clauses)
goto out;
clauses = TREE_VALUE (clauses);
if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
goto out;
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_INBRANCH:
clone_info->inbranch = 1;
*inbranch_specified = true;
break;
case OMP_CLAUSE_NOTINBRANCH:
clone_info->inbranch = 0;
*inbranch_specified = true;
break;
case OMP_CLAUSE_SIMDLEN:
clone_info->simdlen
= TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
break;
case OMP_CLAUSE_LINEAR:
{
tree decl = OMP_CLAUSE_DECL (t);
tree step = OMP_CLAUSE_LINEAR_STEP (t);
int argno = TREE_INT_CST_LOW (decl);
if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
gcc_assert (clone_info->args[argno].linear_step >= 0
&& clone_info->args[argno].linear_step < n);
}
else
{
if (POINTER_TYPE_P (args[argno]))
step = fold_convert (ssizetype, step);
if (!tree_fits_shwi_p (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring large linear step");
args.release ();
return NULL;
}
else if (integer_zerop (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring zero linear step");
args.release ();
return NULL;
}
else
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
}
}
break;
}
case OMP_CLAUSE_UNIFORM:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].arg_type
= SIMD_CLONE_ARG_TYPE_UNIFORM;
break;
}
case OMP_CLAUSE_ALIGNED:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].alignment
= TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
break;
}
default:
break;
}
}
out:
if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl))))
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified return type");
args.release ();
return NULL;
}
for (unsigned int argno = 0; argno < clone_info->nargs; argno++)
if (TYPE_ATOMIC (args[argno])
&& clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM)
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified non-%<uniform%> argument");
args.release ();
return NULL;
}
args.release ();
return clone_info;
}
/* Given a SIMD clone in NODE, calculate the characteristic data
type and return the coresponding type. The characteristic data
type is computed as described in the Intel Vector ABI. */
static tree
simd_clone_compute_base_data_type (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
tree type = integer_type_node;
tree fndecl = node->decl;
/* a) For non-void function, the characteristic data type is the
return type. */
if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
type = TREE_TYPE (TREE_TYPE (fndecl));
/* b) If the function has any non-uniform, non-linear parameters,
then the characteristic data type is the type of the first
such parameter. */
else
{
vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
for (unsigned int i = 0; i < clone_info->nargs; ++i)
if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
type = map[i];
break;
}
map.release ();
}
/* c) If the characteristic data type determined by a) or b) above
is struct, union, or class type which is pass-by-value (except
for the type that maps to the built-in complex data type), the
characteristic data type is int. */
if (RECORD_OR_UNION_TYPE_P (type)
&& !aggregate_value_p (type, NULL)
&& TREE_CODE (type) != COMPLEX_TYPE)
return integer_type_node;
/* d) If none of the above three classes is applicable, the
characteristic data type is int. */
return type;
/* e) For Intel Xeon Phi native and offload compilation, if the
resulting characteristic data type is 8-bit or 16-bit integer
data type, the characteristic data type is int. */
/* Well, we don't handle Xeon Phi yet. */
}
static tree
simd_clone_mangle (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
char vecsize_mangle = clone_info->vecsize_mangle;
char mask = clone_info->inbranch ? 'M' : 'N';
unsigned int simdlen = clone_info->simdlen;
unsigned int n;
pretty_printer pp;
gcc_assert (vecsize_mangle && simdlen);
pp_string (&pp, "_ZGV");
pp_character (&pp, vecsize_mangle);
pp_character (&pp, mask);
pp_decimal_int (&pp, simdlen);
for (n = 0; n < clone_info->nargs; ++n)
{
struct cgraph_simd_clone_arg arg = clone_info->args[n];
switch (arg.arg_type)
{
case SIMD_CLONE_ARG_TYPE_UNIFORM:
pp_character (&pp, 'u');
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
pp_character (&pp, 'l');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
pp_character (&pp, 'R');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
pp_character (&pp, 'L');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
pp_character (&pp, 'U');
goto mangle_linear;
mangle_linear:
gcc_assert (arg.linear_step != 0);
if (arg.linear_step > 1)
pp_unsigned_wide_integer (&pp, arg.linear_step);
else if (arg.linear_step < 0)
{
pp_character (&pp, 'n');
pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
arg.linear_step));
}
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
pp_string (&pp, "ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
pp_string (&pp, "Rs");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
pp_string (&pp, "Ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
pp_string (&pp, "Us");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
default:
pp_character (&pp, 'v');
}
if (arg.alignment)
{
pp_character (&pp, 'a');
pp_decimal_int (&pp, arg.alignment);
}
}
pp_underscore (&pp);
const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
if (*str == '*')
++str;
pp_string (&pp, str);
str = pp_formatted_text (&pp);
/* If there already is a SIMD clone with the same mangled name, don't
add another one. This can happen e.g. for
#pragma omp declare simd
#pragma omp declare simd simdlen(8)
int foo (int, int);
if the simdlen is assumed to be 8 for the first one, etc. */
for (struct cgraph_node *clone = node->simd_clones; clone;
clone = clone->simdclone->next_clone)
if (id_equal (DECL_ASSEMBLER_NAME (clone->decl), str))
return NULL_TREE;
return get_identifier (str);
}
/* Create a simd clone of OLD_NODE and return it. */
static struct cgraph_node *
simd_clone_create (struct cgraph_node *old_node)
{
struct cgraph_node *new_node;
if (old_node->definition)
{
if (!old_node->has_gimple_body_p ())
return NULL;
old_node->get_body ();
new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
false, NULL, NULL,
"simdclone");
}
else
{
tree old_decl = old_node->decl;
tree new_decl = copy_node (old_node->decl);
DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
SET_DECL_RTL (new_decl, NULL);
DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
DECL_STATIC_DESTRUCTOR (new_decl) = 0;
new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
if (old_node->in_other_partition)
new_node->in_other_partition = 1;
}
if (new_node == NULL)
return new_node;
DECL_BUILT_IN_CLASS (new_node->decl) = NOT_BUILT_IN;
DECL_FUNCTION_CODE (new_node->decl) = (enum built_in_function) 0;
TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl);
DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl);
DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl);
DECL_VISIBILITY_SPECIFIED (new_node->decl)
= DECL_VISIBILITY_SPECIFIED (old_node->decl);
DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl);
DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl);
if (DECL_ONE_ONLY (old_node->decl))
make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl));
/* The method cgraph_version_clone_with_body () will force the new
symbol local. Undo this, and inherit external visibility from
the old node. */
new_node->local.local = old_node->local.local;
new_node->externally_visible = old_node->externally_visible;
return new_node;
}
/* Adjust the return type of the given function to its appropriate
vector counterpart. Returns a simd array to be used throughout the
function as a return value. */
static tree
simd_clone_adjust_return_type (struct cgraph_node *node)
{
tree fndecl = node->decl;
tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
unsigned int veclen;
tree t;
/* Adjust the function return type. */
if (orig_rettype == void_type_node)
return NULL_TREE;
TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
t = TREE_TYPE (TREE_TYPE (fndecl));
if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
veclen = node->simdclone->vecsize_int;
else
veclen = node->simdclone->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t));
if (veclen > node->simdclone->simdlen)
veclen = node->simdclone->simdlen;
if (POINTER_TYPE_P (t))
t = pointer_sized_int_node;
if (veclen == node->simdclone->simdlen)
t = build_vector_type (t, node->simdclone->simdlen);
else
{
t = build_vector_type (t, veclen);
t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
}
TREE_TYPE (TREE_TYPE (fndecl)) = t;
if (!node->definition)
return NULL_TREE;
t = DECL_RESULT (fndecl);
/* Adjust the DECL_RESULT. */
gcc_assert (TREE_TYPE (t) != void_type_node);
TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
relayout_decl (t);
tree atype = build_array_type_nelts (orig_rettype,
node->simdclone->simdlen);
if (veclen != node->simdclone->simdlen)
return build1 (VIEW_CONVERT_EXPR, atype, t);
/* Set up a SIMD array to use as the return value. */
tree retval = create_tmp_var_raw (atype, "retval");
gimple_add_tmp_var (retval);
return retval;
}
/* Each vector argument has a corresponding array to be used locally
as part of the eventual loop. Create such temporary array and
return it.
PREFIX is the prefix to be used for the temporary.
TYPE is the inner element type.
SIMDLEN is the number of elements. */
static tree
create_tmp_simd_array (const char *prefix, tree type, int simdlen)
{
tree atype = build_array_type_nelts (type, simdlen);
tree avar = create_tmp_var_raw (atype, prefix);
gimple_add_tmp_var (avar);
return avar;
}
/* Modify the function argument types to their corresponding vector
counterparts if appropriate. Also, create one array for each simd
argument to be used locally when using the function arguments as
part of the loop.
NODE is the function whose arguments are to be adjusted.
Returns an adjustment vector that will be filled describing how the
argument types will be adjusted. */
static ipa_parm_adjustment_vec
simd_clone_adjust_argument_types (struct cgraph_node *node)
{
vec<tree> args;
ipa_parm_adjustment_vec adjustments;
if (node->definition)
args = ipa_get_vector_of_formal_parms (node->decl);
else
args = simd_clone_vector_of_formal_parm_types (node->decl);
adjustments.create (args.length ());
unsigned i, j, veclen;
struct ipa_parm_adjustment adj;
struct cgraph_simd_clone *sc = node->simdclone;
for (i = 0; i < sc->nargs; ++i)
{
memset (&adj, 0, sizeof (adj));
tree parm = args[i];
tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
adj.base_index = i;
adj.base = parm;
sc->args[i].orig_arg = node->definition ? parm : NULL_TREE;
sc->args[i].orig_type = parm_type;
switch (sc->args[i].arg_type)
{
default:
/* No adjustment necessary for scalar arguments. */
adj.op = IPA_PARM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
TREE_TYPE (parm_type),
sc->simdlen);
adj.op = IPA_PARM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_VECTOR:
if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
adj.arg_prefix = "simd";
if (POINTER_TYPE_P (parm_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (parm_type, veclen);
sc->args[i].vector_type = adj.type;
for (j = veclen; j < sc->simdlen; j += veclen)
{
adjustments.safe_push (adj);
if (j == veclen)
{
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARM_OP_NEW;
adj.arg_prefix = "simd";
adj.base_index = i;
adj.type = sc->args[i].vector_type;
}
}
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (DECL_NAME (parm)
? IDENTIFIER_POINTER (DECL_NAME (parm))
: NULL, parm_type, sc->simdlen);
}
adjustments.safe_push (adj);
}
if (sc->inbranch)
{
tree base_type = simd_clone_compute_base_data_type (sc->origin, sc);
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARM_OP_NEW;
adj.arg_prefix = "mask";
adj.base_index = i;
if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
if (sc->mask_mode != VOIDmode)
adj.type
= lang_hooks.types.type_for_mode (sc->mask_mode, 1);
else if (POINTER_TYPE_P (base_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (base_type, veclen);
adjustments.safe_push (adj);
for (j = veclen; j < sc->simdlen; j += veclen)
adjustments.safe_push (adj);
/* We have previously allocated one extra entry for the mask. Use
it and fill it. */
sc->nargs++;
if (sc->mask_mode != VOIDmode)
base_type = boolean_type_node;
if (node->definition)
{
sc->args[i].orig_arg
= build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
if (sc->mask_mode == VOIDmode)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", base_type, sc->simdlen);
else if (veclen < sc->simdlen)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen);
else
sc->args[i].simd_array = NULL_TREE;
}
sc->args[i].orig_type = base_type;
sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
}
if (node->definition)
ipa_modify_formal_parameters (node->decl, adjustments);
else
{
tree new_arg_types = NULL_TREE, new_reversed;
bool last_parm_void = false;
if (args.length () > 0 && args.last () == void_type_node)
last_parm_void = true;
gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
j = adjustments.length ();
for (i = 0; i < j; i++)
{
struct ipa_parm_adjustment *adj = &adjustments[i];
tree ptype;
if (adj->op == IPA_PARM_OP_COPY)
ptype = args[adj->base_index];
else
ptype = adj->type;
new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
}
new_reversed = nreverse (new_arg_types);
if (last_parm_void)
{
if (new_reversed)
TREE_CHAIN (new_arg_types) = void_list_node;
else
new_reversed = void_list_node;
}
tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
TYPE_ARG_TYPES (new_type) = new_reversed;
TREE_TYPE (node->decl) = new_type;
adjustments.release ();
}
args.release ();
return adjustments;
}
/* Initialize and copy the function arguments in NODE to their
corresponding local simd arrays. Returns a fresh gimple_seq with
the instruction sequence generated. */
static gimple_seq
simd_clone_init_simd_arrays (struct cgraph_node *node,
ipa_parm_adjustment_vec adjustments)
{
gimple_seq seq = NULL;
unsigned i = 0, j = 0, k;
for (tree arg = DECL_ARGUMENTS (node->decl);
arg;
arg = DECL_CHAIN (arg), i++, j++)
{
if (adjustments[j].op == IPA_PARM_OP_COPY
|| POINTER_TYPE_P (TREE_TYPE (arg)))
continue;
node->simdclone->args[i].vector_arg = arg;
tree array = node->simdclone->args[i].simd_array;
if (node->simdclone->mask_mode != VOIDmode
&& node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
{
if (array == NULL_TREE)
continue;
unsigned int l
= tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array))));
for (k = 0; k <= l; k++)
{
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)),
array, size_int (k), NULL, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
continue;
}
if (simd_clone_subparts (TREE_TYPE (arg)) == node->simdclone->simdlen)
{
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
tree ptr = build_fold_addr_expr (array);
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, 0));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
else
{
unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg));
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
for (k = 0; k < node->simdclone->simdlen; k += simdlen)
{
tree ptr = build_fold_addr_expr (array);
int elemsize;
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree elemtype = TREE_TYPE (TREE_TYPE (arg));
elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype));
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, k * elemsize));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
}
}
return seq;
}
/* Callback info for ipa_simd_modify_stmt_ops below. */
struct modify_stmt_info {
ipa_parm_adjustment_vec adjustments;
gimple *stmt;
/* True if the parent statement was modified by
ipa_simd_modify_stmt_ops. */
bool modified;
};
/* Callback for walk_gimple_op.
Adjust operands from a given statement as specified in the
adjustments vector in the callback data. */
static tree
ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
tree *orig_tp = tp;
if (TREE_CODE (*tp) == ADDR_EXPR)
tp = &TREE_OPERAND (*tp, 0);
struct ipa_parm_adjustment *cand = NULL;
if (TREE_CODE (*tp) == PARM_DECL)
cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
else
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
}
tree repl = NULL_TREE;
if (cand)
repl = unshare_expr (cand->new_decl);
else
{
if (tp != orig_tp)
{
*walk_subtrees = 0;
bool modified = info->modified;
info->modified = false;
walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
if (!info->modified)
{
info->modified = modified;
return NULL_TREE;
}
info->modified = modified;
repl = *tp;
}
else
return NULL_TREE;
}
if (tp != orig_tp)
{
repl = build_fold_addr_expr (repl);
gimple *stmt;
if (is_gimple_debug (info->stmt))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (repl);
SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl)));
repl = vexpr;
}
else
{
stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
repl = gimple_assign_lhs (stmt);
}
gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
*orig_tp = repl;
}
else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
{
tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
*tp = vce;
}
else
*tp = repl;
info->modified = true;
return NULL_TREE;
}
/* Traverse the function body and perform all modifications as
described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
modified such that the replacement/reduction value will now be an
offset into the corresponding simd_array.
This function will replace all function argument uses with their
corresponding simd array elements, and ajust the return values
accordingly. */
static void
ipa_simd_modify_function_body (struct cgraph_node *node,
ipa_parm_adjustment_vec adjustments,
tree retval_array, tree iter)
{
basic_block bb;
unsigned int i, j, l;
/* Re-use the adjustments array, but this time use it to replace
every function argument use to an offset into the corresponding
simd_array. */
for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
{
if (!node->simdclone->args[i].vector_arg)
continue;
tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
adjustments[j].new_decl
= build4 (ARRAY_REF,
basetype,
node->simdclone->args[i].simd_array,
iter,
NULL_TREE, NULL_TREE);
if (adjustments[j].op == IPA_PARM_OP_NONE
&& simd_clone_subparts (vectype) < node->simdclone->simdlen)
j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1;
}
l = adjustments.length ();
tree name;
FOR_EACH_SSA_NAME (i, name, cfun)
{
if (SSA_NAME_VAR (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
{
for (j = 0; j < l; j++)
if (SSA_NAME_VAR (name) == adjustments[j].base
&& adjustments[j].new_decl)
{
tree base_var;
if (adjustments[j].new_ssa_base == NULL_TREE)
{
base_var
= copy_var_decl (adjustments[j].base,
DECL_NAME (adjustments[j].base),
TREE_TYPE (adjustments[j].base));
adjustments[j].new_ssa_base = base_var;
}
else
base_var = adjustments[j].new_ssa_base;
if (SSA_NAME_IS_DEFAULT_DEF (name))
{
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree new_decl = unshare_expr (adjustments[j].new_decl);
set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
SSA_NAME_IS_DEFAULT_DEF (name) = 0;
gimple *stmt = gimple_build_assign (name, new_decl);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
}
else
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
}
}
}
struct modify_stmt_info info;
info.adjustments = adjustments;
FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
{
gimple_stmt_iterator gsi;
gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
info.stmt = stmt;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
{
tree retval = gimple_return_retval (return_stmt);
if (!retval)
{
gsi_remove (&gsi, true);
continue;
}
/* Replace `return foo' with `retval_array[iter] = foo'. */
tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
retval_array, iter, NULL, NULL);
stmt = gimple_build_assign (ref, retval);
gsi_replace (&gsi, stmt, true);
info.modified = true;
}
if (info.modified)
{
update_stmt (stmt);
if (maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (gimple_bb (stmt));
}
gsi_next (&gsi);
}
}
}
/* Helper function of simd_clone_adjust, return linear step addend
of Ith argument. */
static tree
simd_clone_linear_addend (struct cgraph_node *node, unsigned int i,
tree addtype, basic_block entry_bb)
{
tree ptype = NULL_TREE;
switch (node->simdclone->args[i].arg_type)
{
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
return build_int_cst (addtype, node->simdclone->args[i].linear_step);
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
ptype = TREE_TYPE (node->simdclone->args[i].orig_arg);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg));
break;
default:
gcc_unreachable ();
}
unsigned int idx = node->simdclone->args[i].linear_step;
tree arg = node->simdclone->args[idx].orig_arg;
gcc_assert (is_gimple_reg_type (TREE_TYPE (arg)));
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
gimple *g;
tree ret;
if (is_gimple_reg (arg))
ret = get_or_create_ssa_default_def (cfun, arg);
else
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))),
build_simple_mem_ref (ret));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (!useless_type_conversion_p (addtype, TREE_TYPE (ret)))
{
g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (POINTER_TYPE_P (ptype))
{
tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype));
if (size && TREE_CODE (size) == INTEGER_CST)
{
g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR,
ret, fold_convert (addtype, size));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
}
return ret;
}
/* Adjust the argument types in NODE to their appropriate vector
counterparts. */
static void
simd_clone_adjust (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
targetm.simd_clone.adjust (node);
tree retval = simd_clone_adjust_return_type (node);
ipa_parm_adjustment_vec adjustments
= simd_clone_adjust_argument_types (node);
push_gimplify_context ();
gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
/* Adjust all uses of vector arguments accordingly. Adjust all
return values accordingly. */
tree iter = create_tmp_var (unsigned_type_node, "iter");
tree iter1 = make_ssa_name (iter);
tree iter2 = NULL_TREE;
ipa_simd_modify_function_body (node, adjustments, retval, iter1);
adjustments.release ();
/* Initialize the iteration variable. */
basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
basic_block body_bb = split_block_after_labels (entry_bb)->dest;
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
/* Insert the SIMD array and iv initialization at function
entry. */
gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
pop_gimplify_context (NULL);
gimple *g;
basic_block incr_bb = NULL;
struct loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
incr_bb = create_empty_bb (orig_exit);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
/* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
flag. Set it now to be a FALLTHRU_EDGE. */
gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
for (unsigned i = 0;
i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
{
edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
redirect_edge_succ (e, incr_bb);
incr_bb->count += e->count ();
}
}
else if (node->simdclone->inbranch)
{
incr_bb = create_empty_bb (entry_bb);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
}
if (incr_bb)
{
make_single_succ_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
gsi = gsi_last_bb (incr_bb);
iter2 = make_ssa_name (iter);
g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
build_int_cst (unsigned_type_node, 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
/* Mostly annotate the loop for the vectorizer (the rest is done
below). */
loop = alloc_loop ();
cfun->has_force_vectorize_loops = true;
loop->safelen = node->simdclone->simdlen;
loop->force_vectorize = true;
loop->header = body_bb;
}
/* Branch around the body if the mask applies. */
if (node->simdclone->inbranch)
{
gsi = gsi_last_bb (loop->header);
tree mask_array
= node->simdclone->args[node->simdclone->nargs - 1].simd_array;
tree mask;
if (node->simdclone->mask_mode != VOIDmode)
{
tree shift_cnt;
if (mask_array == NULL_TREE)
{
tree arg = node->simdclone->args[node->simdclone->nargs
- 1].vector_arg;
mask = get_or_create_ssa_default_def (cfun, arg);
shift_cnt = iter1;
}
else
{
tree maskt = TREE_TYPE (mask_array);
int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt)));
c = node->simdclone->simdlen / (c + 1);
int s = exact_log2 (c);
gcc_assert (s > 0);
c--;
tree idx = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (idx, RSHIFT_EXPR, iter1,
build_int_cst (NULL_TREE, s));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, idx, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
shift_cnt = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1,
build_int_cst (TREE_TYPE (iter1), c));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
RSHIFT_EXPR, mask, shift_cnt);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
BIT_AND_EXPR, mask,
build_int_cst (TREE_TYPE (mask), 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
}
else
{
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, iter1, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref)));
if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
{
aref = build1 (VIEW_CONVERT_EXPR,
build_nonstandard_integer_type (bitsize, 0),
mask);
mask = make_ssa_name (TREE_TYPE (aref));
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
}
g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
e->probability = profile_probability::unlikely ().guessed ();
incr_bb->count += e->count ();
edge fallthru = FALLTHRU_EDGE (loop->header);
fallthru->flags = EDGE_FALSE_VALUE;
fallthru->probability = profile_probability::likely ().guessed ();
}
basic_block latch_bb = NULL;
basic_block new_exit_bb = NULL;
/* Generate the condition. */
if (incr_bb)
{
gsi = gsi_last_bb (incr_bb);
g = gimple_build_cond (LT_EXPR, iter2,
build_int_cst (unsigned_type_node,
node->simdclone->simdlen),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = split_block (incr_bb, gsi_stmt (gsi));
latch_bb = e->dest;
new_exit_bb = split_block_after_labels (latch_bb)->dest;
loop->latch = latch_bb;
redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
edge new_e = make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
/* FIXME: Do we need to distribute probabilities for the conditional? */
new_e->probability = profile_probability::guessed_never ();
/* The successor of incr_bb is already pointing to latch_bb; just
change the flags.
make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
}
gphi *phi = create_phi_node (iter1, body_bb);
edge preheader_edge = find_edge (entry_bb, body_bb);
edge latch_edge = NULL;
add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
UNKNOWN_LOCATION);
if (incr_bb)
{
latch_edge = single_succ_edge (latch_bb);
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
/* Generate the new return. */
gsi = gsi_last_bb (new_exit_bb);
if (retval
&& TREE_CODE (retval) == VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
retval = TREE_OPERAND (retval, 0);
else if (retval)
{
retval = build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (TREE_TYPE (node->decl)),
retval);
retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
false, GSI_CONTINUE_LINKING);
}
g = gimple_build_return (retval);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
/* Handle aligned clauses by replacing default defs of the aligned
uniform args with __builtin_assume_aligned (arg_N(D), alignment)
lhs. Handle linear by adding PHIs. */
for (unsigned i = 0; i < node->simdclone->nargs; i++)
if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
|| !is_gimple_reg_type
(TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
else
{
iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
gimple_add_tmp_var (iter1);
}
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== REFERENCE_TYPE
&& TREE_ADDRESSABLE
(TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
gimple_add_tmp_var (iter1);
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, build_simple_mem_ref (def));
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (build_simple_mem_ref (def), iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
}
else if (node->simdclone->args[i].alignment
&& node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
&& (node->simdclone->args[i].alignment
& (node->simdclone->args[i].alignment - 1)) == 0
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== POINTER_TYPE)
{
unsigned int alignment = node->simdclone->args[i].alignment;
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
gimple_seq seq = NULL;
bool need_cvt = false;
gcall *call
= gimple_build_call (fn, 2, def, size_int (alignment));
g = call;
if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
ptr_type_node))
need_cvt = true;
tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
gimple_call_set_lhs (g, t);
gimple_seq_add_stmt_without_update (&seq, g);
if (need_cvt)
{
t = make_ssa_name (orig_arg);
g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
gimple_seq_add_stmt_without_update (&seq, g);
}
gsi_insert_seq_on_edge_immediate
(single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
node->create_edge (cgraph_node::get_create (fn),
call, entry_bb->count);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
tree repl = gimple_get_lhs (g);
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (is_gimple_debug (use_stmt) || use_stmt == call)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, repl);
}
}
else if ((node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
|| POINTER_TYPE_P (TREE_TYPE (orig_arg)));
tree def = NULL_TREE;
if (TREE_ADDRESSABLE (orig_arg))
{
def = make_ssa_name (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
if (incr_bb)
iter2 = make_ssa_name (TREE_TYPE (orig_arg));
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (def, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
{
def = ssa_default_def (cfun, orig_arg);
if (!def || has_zero_uses (def))
def = NULL_TREE;
else
{
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
}
}
if (def)
{
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? TREE_TYPE (orig_arg) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
gsi = gsi_last_bb (incr_bb);
g = gimple_build_assign (iter2, code, iter1, addcst);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
if (TREE_ADDRESSABLE (orig_arg))
{
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == phi)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
}
}
else if (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
gcc_assert (!TREE_ADDRESSABLE (orig_arg)
&& TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE);
if (def && !has_zero_uses (def))
{
tree rtype = TREE_TYPE (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
tree iter3 = make_ssa_name (rtype);
tree iter4 = make_ssa_name (rtype);
tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE;
gsi = gsi_after_labels (entry_bb);
gimple *load
= gimple_build_assign (iter3, build_simple_mem_ref (def));
gsi_insert_before (&gsi, load, GSI_NEW_STMT);
tree array = node->simdclone->args[i].simd_array;
TREE_ADDRESSABLE (array) = 1;
tree ptr = build_fold_addr_expr (array);
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1,
TYPE_SIZE_UNIT (TREE_TYPE (iter3)));
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
phi = create_phi_node (iter4, body_bb);
add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? TREE_TYPE (iter3) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
g = gimple_build_assign (iter5, code, iter4, addcst);
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
g = gimple_build_assign (build_simple_mem_ref (iter1), iter4);
gsi = gsi_after_labels (body_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == load)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
if (!TYPE_READONLY (rtype) && incr_bb)
{
tree v = make_ssa_name (rtype);
tree aref = build4 (ARRAY_REF, rtype, array,
size_zero_node, NULL_TREE,
NULL_TREE);
gsi = gsi_after_labels (new_exit_bb);
g = gimple_build_assign (v, aref);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (build_simple_mem_ref (def), v);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
}
}
calculate_dominance_info (CDI_DOMINATORS);
if (loop)
add_loop (loop, loop->header->loop_father);
update_ssa (TODO_update_ssa);
pop_cfun ();
}
/* If the function in NODE is tagged as an elemental SIMD function,
create the appropriate SIMD clones. */
void
expand_simd_clones (struct cgraph_node *node)
{
tree attr = lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl));
if (attr == NULL_TREE
|| node->global.inlined_to
|| lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
return;
/* Ignore
#pragma omp declare simd
extern int foo ();
in C, there we don't know the argument types at all. */
if (!node->definition
&& TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
return;
/* Call this before creating clone_info, as it might ggc_collect. */
if (node->definition && node->has_gimple_body_p ())
node->get_body ();
do
{
/* Start with parsing the "omp declare simd" attribute(s). */
bool inbranch_clause_specified;
struct cgraph_simd_clone *clone_info
= simd_clone_clauses_extract (node, TREE_VALUE (attr),
&inbranch_clause_specified);
if (clone_info == NULL)
continue;
int orig_simdlen = clone_info->simdlen;
tree base_type = simd_clone_compute_base_data_type (node, clone_info);
/* The target can return 0 (no simd clones should be created),
1 (just one ISA of simd clones should be created) or higher
count of ISA variants. In that case, clone_info is initialized
for the first ISA variant. */
int count
= targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
base_type, 0);
if (count == 0)
continue;
/* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
also create one inbranch and one !inbranch clone of it. */
for (int i = 0; i < count * 2; i++)
{
struct cgraph_simd_clone *clone = clone_info;
if (inbranch_clause_specified && (i & 1) != 0)
continue;
if (i != 0)
{
clone = simd_clone_struct_alloc (clone_info->nargs
+ ((i & 1) != 0));
simd_clone_struct_copy (clone, clone_info);
/* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
and simd_clone_adjust_argument_types did to the first
clone's info. */
clone->nargs -= clone_info->inbranch;
clone->simdlen = orig_simdlen;
/* And call the target hook again to get the right ISA. */
targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
base_type,
i / 2);
if ((i & 1) != 0)
clone->inbranch = 1;
}
/* simd_clone_mangle might fail if such a clone has been created
already. */
tree id = simd_clone_mangle (node, clone);
if (id == NULL_TREE)
continue;
/* Only when we are sure we want to create the clone actually
clone the function (or definitions) or create another
extern FUNCTION_DECL (for prototypes without definitions). */
struct cgraph_node *n = simd_clone_create (node);
if (n == NULL)
continue;
n->simdclone = clone;
clone->origin = node;
clone->next_clone = NULL;
if (node->simd_clones == NULL)
{
clone->prev_clone = n;
node->simd_clones = n;
}
else
{
clone->prev_clone = node->simd_clones->simdclone->prev_clone;
clone->prev_clone->simdclone->next_clone = n;
node->simd_clones->simdclone->prev_clone = n;
}
symtab->change_decl_assembler_name (n->decl, id);
/* And finally adjust the return type, parameters and for
definitions also function body. */
if (node->definition)
simd_clone_adjust (n);
else
{
simd_clone_adjust_return_type (n);
simd_clone_adjust_argument_types (n);
}
}
}
while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
}
/* Entry point for IPA simd clone creation pass. */
static unsigned int
ipa_omp_simd_clone (void)
{
struct cgraph_node *node;
FOR_EACH_FUNCTION (node)
expand_simd_clones (node);
return 0;
}
namespace {
const pass_data pass_data_omp_simd_clone =
{
SIMPLE_IPA_PASS, /* type */
"simdclone", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_omp_simd_clone : public simple_ipa_opt_pass
{
public:
pass_omp_simd_clone(gcc::context *ctxt)
: simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *);
virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
};
bool
pass_omp_simd_clone::gate (function *)
{
return targetm.simd_clone.compute_vecsize_and_simdlen != NULL;
}
} // anon namespace
simple_ipa_opt_pass *
make_pass_omp_simd_clone (gcc::context *ctxt)
{
return new pass_omp_simd_clone (ctxt);
}
|
stereo_costs.h | #ifndef RECONSTRUCTION_BASE_STEREO_COSTS_
#define RECONSTRUCTION_BASE_STEREO_COSTS_
#include <iostream>
#include <unordered_map>
#include <opencv2/core/core.hpp>
#include <Eigen/Core>
#include "../../core/types.h"
namespace recon
{
typedef Eigen::Matrix<uint32_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixCensusCosts;
namespace StereoCosts
{
void calcPatchMeans(const cv::Mat& img, cv::Mat& means, int wsz);
template<typename T>
uint8_t hamming_dist(T x, T y);
uint32_t get_cost_SAD(const cv::Mat& left_img, const cv::Mat& right_img, int wsz, int cx, int cy, int d);
float get_cost_ZSAD(const cv::Mat& left_img, const cv::Mat& right_img,
const cv::Mat& left_means, const cv::Mat& right_means,
int wsz, int cx, int cy, int d);
double get_cost_NCC(const core::DescriptorNCC& d1, const core::DescriptorNCC& d2);
void census_transform(const cv::Mat& img, int wsz, cv::Mat& census);
uint32_t census_transform_point(const core::Point& pt, const cv::Mat& img, int wsz);
template<typename T>
void compute_ncc_descriptor(const cv::Mat& img, const core::Point& feat, const int window_sz,
const int cv_type, core::DescriptorNCC& desc);
template<typename T>
void compute_ncc_descriptor(const cv::Mat& img, const int cx, const int cy, const int window_sz,
const int cv_type, core::DescriptorNCC& desc);
void compute_image_ncc_descriptors(const cv::Mat& img, int window_sz,
std::vector<core::DescriptorNCC>& desciptors);
} // end namespace: StereoCosts
template<typename T>
inline
void StereoCosts::compute_ncc_descriptor(const cv::Mat& img, const int cx, const int cy, const int window_sz,
const int cv_type, core::DescriptorNCC& desc)
{
int desc_sz = window_sz * window_sz;
int margin_sz = (window_sz - 1) / 2;
//desc.vec.create(desc_sz, 1, CV_8U);
//desc.vec.create(desc_sz, 1, CV_32F);
desc.vec.create(desc_sz, 1, cv_type);
desc.A = 0.0;
desc.B = 0.0;
desc.C = 0.0;
assert(cx >= margin_sz && cx < (img.cols - margin_sz));
assert(cy >= margin_sz && cy < (img.rows - margin_sz));
int vpos = 0;
for(int y = cy - margin_sz; y <= cy + margin_sz; y++) {
for(int x = cx - margin_sz; x <= cx + margin_sz; x++) {
T val = img.at<T>(y,x);
desc.vec.at<T>(vpos) = val;
double dval = static_cast<double>(val);
desc.A += dval;
desc.B += dval*dval;
vpos++;
}
}
// var - variance * N^2
double var = std::sqrt((desc_sz * desc.B) - (desc.A * desc.A));
// C = N^2 / variance
if (var > 0.0)
desc.C = 1.0 / var;
// if variance equals 0 set negative C
else
desc.C = -1.0;
assert(!std::isnan(desc.C));
assert(!std::isinf(desc.C));
}
template<typename T>
inline
void StereoCosts::compute_ncc_descriptor(const cv::Mat& img, const core::Point& feat, const int window_sz,
const int cv_type, core::DescriptorNCC& desc)
{
int cx = static_cast<int>(feat.x_);
int cy = static_cast<int>(feat.y_);
compute_ncc_descriptor<T>(img, cx, cy, window_sz, cv_type, desc);
}
inline
void StereoCosts::compute_image_ncc_descriptors(const cv::Mat& img, int window_sz,
std::vector<core::DescriptorNCC>& desciptors)
{
int margin_sz = (window_sz-1) / 2;
int width = img.cols - 2*margin_sz;
int height = img.rows - 2*margin_sz;
int num_of_desc = width * height;
desciptors.resize(num_of_desc);
#pragma omp parallel for
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
core::Point pt;
pt.x_ = x + margin_sz;
pt.y_ = y + margin_sz;
compute_ncc_descriptor<uint8_t>(img, pt, window_sz, CV_8U, desciptors[y*width + x]);
}
}
}
template<typename T>
inline
uint8_t StereoCosts::hamming_dist(T x, T y)
{
uint8_t dist = 0;
T val = x ^ y; // XOR
// Count the number of set bits
while(val) {
++dist;
val &= val - 1;
}
return dist;
}
inline
uint32_t StereoCosts::get_cost_SAD(const cv::Mat& left_img, const cv::Mat& right_img, int wsz, int cx, int cy, int d)
{
int ssz = (wsz-1) / 2;
int SAD = 0;
for(int y = (cy - ssz); y <= (cy + ssz); y++) {
for(int x = (cx - ssz); x <= (cx + ssz); x++) {
int idiff = static_cast<int>(left_img.at<uint8_t>(y,x) - right_img.at<uint8_t>(y,x-d));
SAD += std::abs(idiff);
}
}
//std::cout << "SAD = " << sad << "\n";
return SAD;
}
inline
float StereoCosts::get_cost_ZSAD(const cv::Mat& left_img, const cv::Mat& right_img,
const cv::Mat& left_means, const cv::Mat& right_means,
int wsz, int cx, int cy, int d)
{
assert(left_img.rows == (left_means.rows + (wsz-1)));
int ssz = (wsz-1) / 2;
float zsad = 0.0f;
int cx2 = cx - ssz;
int cy2 = cy - ssz;
float mean_diff = left_means.at<float>(cy2,cx2) - right_means.at<float>(cy2,cx2-d);
for(int y = (cy - ssz); y <= (cy + ssz); y++) {
for(int x = (cx - ssz); x <= (cx + ssz); x++) {
float idiff = left_img.at<uint8_t>(y,x) - right_img.at<uint8_t>(y,x-d);
zsad += std::abs(idiff - mean_diff);
}
}
//std::cout << "ZSAD = " << zsad << "\n";
return zsad;
}
inline
double StereoCosts::get_cost_NCC(const core::DescriptorNCC& d1, const core::DescriptorNCC& d2)
{
assert(d1.vec.rows == d2.vec.rows);
if (d1.C < 0.0 || d2.C < 0.0)
return -1.0;
double n = d1.vec.rows;
double D = d1.vec.dot(d2.vec);
double ncc = (n * D - (d1.A * d2.A)) * d1.C * d2.C;
//if(std::isnan(ncc) || std::isinf(ncc)) {
// printf("TRUE! NCC = %f\n", ncc);
// printf("D = %f\n", D);
// ncc = 0.0;
// throw 1;
//}
return ncc;
}
}
#endif
|
GB_unaryop__minv_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_uint16
// op(A') function: GB_tran__minv_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
linAlgWeightedNorm2.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C"
void weightedNorm2(const dlong & Nblocks, const dlong & N,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#pragma omp parallel for reduction(+:wa2)
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
cpu_wa[0] = wa2;
}
extern "C"
void weightedNorm2Many(const dlong & Nblocks, const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#pragma omp parallel for collapse(2) reduction(+:wa2)
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
}
cpu_wa[0] = wa2;
} |
fields_modifiers.c | // RUN: %libomp-compile-and-run
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \
__LINE__); \
exit(1); \
}
#define BUFFER_SIZE 1024
char buf[BUFFER_SIZE];
#pragma omp threadprivate(buf)
char* get_string(size_t check_needed) {
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
//printf("buf = %s\n", buf);
check(needed < BUFFER_SIZE);
if (check_needed != 0) {
check(needed == check_needed);
}
return buf;
}
void check_thread_num_padded_rjustified() {
int i;
const char* formats[2] = {"%0.8{thread_num}", "%0.8n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(8);
for (j = 0; j < 7; ++j) {
check(s[j] == '0');
}
check(s[j] == ctid);
}
}
}
void check_thread_num_rjustified() {
int i;
const char* formats[2] = {"%.12{thread_num}", "%.12n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(12);
for (j = 0; j < 11; ++j) {
check(s[j] == ' ');
}
check(s[j] == ctid);
}
}
}
void check_thread_num_ljustified() {
int i;
const char* formats[2] = {"%5{thread_num}", "%5n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(5);
check(s[0] == ctid);
for (j = 1; j < 5; ++j) {
check(s[j] == ' ');
}
}
}
}
void check_thread_num_padded_ljustified() {
int i;
const char* formats[2] = {"%018{thread_num}", "%018n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(18);
check(s[0] == ctid);
for (j = 1; j < 18; ++j) {
check(s[j] == ' ');
}
}
}
}
int main(int argc, char** argv) {
check_thread_num_ljustified();
check_thread_num_rjustified();
check_thread_num_padded_ljustified();
check_thread_num_padded_rjustified();
return 0;
}
|
nqueens-1.c | /* { dg-do run } */
/* { dg-options "-O2 -fopenmp" } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdio.h>
#include <string.h>
int cnt;
#pragma omp threadprivate (cnt)
void
nqueens (char *a, int n, int pos)
{
/* b[i] = j means the queen in i-th row is in column j. */
char b[pos + 1];
int i, j;
memcpy (b, a, pos);
for (i = 0; i < n; i++)
{
for (j = 0; j < pos; j++)
if (b[j] == i || b[j] == i + pos - j || i == b[j] + pos - j)
break;
if (j < pos)
continue;
if (pos == n - 1)
/* Found a solution. Could output it here. */
++cnt;
else
{
b[pos] = i;
#pragma omp task
nqueens (b, n, pos + 1);
}
}
}
int
main (int argc, char **argv)
{
int n = 8;
if (argc >= 2)
n = strtoul (argv[1], NULL, 0);
if (n < 1 || n > 127)
{
fprintf (stderr, "invalid count %d\n", n);
return 1;
}
cnt = 0;
double stime = omp_get_wtime ();
nqueens ("", n, 0);
printf ("serial N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
#pragma omp parallel
cnt = 0;
stime = omp_get_wtime ();
int tempcnt = 0;
#pragma omp parallel reduction (+:tempcnt)
{
#pragma omp single
nqueens ("", n, 0);
tempcnt = cnt;
}
cnt = tempcnt;
printf ("parallel N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
return 0;
}
|
Match_fs.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/*
* MATCH_fs package
*
* like Match package but uses "fixed strings" and not regular expressions
*
* 11/2014 in public domain Wesley Ebisuzaki
*
*/
extern int match_fs;
extern int match_flag;
int match_count_fs;
int fgrep, fgrep_flag, fgrep_count;
static const char *match_fs_store[MATCH_MAX];
static int match_fs_type[MATCH_MAX];
static int match_fs_val[MATCH_MAX];
static const char *fgrep_store[MATCH_MAX];
static int fgrep_type[MATCH_MAX];
int is_match_fs(const char *s) {
int i, j;
/* process match and not tests */
for (i = 0; i < match_count_fs; i++) {
if (match_fs_type[i] == 2) continue;
j = (strstr(s, match_fs_store[i]) == NULL);
if (j == match_fs_type[i]) return 1;
}
/* process if-tests */
#pragma omp parallel for private(i)
for (i = 0; i < match_count_fs; i++) {
if (match_fs_type[i] == 2) match_fs_val[i] = (strstr(s, match_fs_store[i]) == NULL);
}
return 0;
}
/*
* HEADER:100:match_fs:setup:1:process data that matches X (fixed string)
*/
int f_match_fs(ARG1) {
if (mode == -1) {
if (match_count_fs >= MATCH_MAX) fatal_error("too many -match_fs, -not_fs options","");
match_fs = 1;
match_fs_store[match_count_fs] = arg1;
match_fs_type[match_count_fs] = 1;
match_count_fs++;
}
return 0;
}
/*
* HEADER:100:not_fs:setup:1:process data that does not match X (fixed string)
*/
int f_not_fs(ARG1) {
if (mode == -1) {
if (match_count_fs >= MATCH_MAX) fatal_error("too many -match_fs, -not_fs options","");
match_fs = 1;
match_fs_store[match_count_fs] = arg1;
match_fs_type[match_count_fs] = 0;
match_count_fs++;
}
return 0;
}
/*
* HEADER:100:if_fs:misc:1:if X (fixed string) matches, conditional execution up to next output/fi
*/
int f_if_fs(ARG1) {
struct local_struct {
int match_cnt;
};
struct local_struct *save;
if (mode == -1) {
if (match_count_fs >= MATCH_MAX) fatal_error("too many -match_fs, -not_fs -if_fs options","");
match_fs = 1;
match_fs_store[match_count_fs] = arg1;
match_fs_type[match_count_fs] = 2;
*local = save = (struct local_struct *) malloc( sizeof(struct local_struct));
if (save == NULL) fatal_error("memory allocation if_fs","");
save->match_cnt = match_count_fs;
match_count_fs++;
}
else if (mode == -2) {
free(*local);
}
else if (mode >= 0) {
save = (struct local_struct *) *local;
match_flag = match_fs_val[save->match_cnt];
}
return 0;
}
/*
* HEADER:100:not_if_fs:misc:1:if X (fixed string) does not match, conditional execution up to next output/fi
*/
int f_not_if_fs(ARG1) {
struct local_struct {
int match_cnt;
};
struct local_struct *save;
if (mode == -1) {
if (match_count_fs >= MATCH_MAX) fatal_error("too many -match_fs, -not_fs -not_if_fs options","");
match_fs = 1;
match_fs_store[match_count_fs] = arg1;
match_fs_type[match_count_fs] = 2;
*local = save = (struct local_struct *) malloc( sizeof(struct local_struct));
if (save == NULL) fatal_error("memory allocation not_if_fs","");
save->match_cnt = match_count_fs;
match_count_fs++;
}
else if (mode == -2) {
free(*local);
}
else if (mode >= 0) {
save = (struct local_struct *) *local;
match_flag = (match_fs_val[save->match_cnt] == 0);
}
return 0;
}
/*
* HEADER:100:fgrep:setup:1:fgrep X | wgrib2
*/
int f_fgrep(ARG1) {
if (mode == -1) {
if (fgrep_count >= GREP_MAX) fatal_error("too many -grep options","");
fgrep = 1;
fgrep_store[fgrep_count] = arg1;
fgrep_type[fgrep_count] = 1;
fgrep_count++;
}
return 0;
}
/*
* HEADER:100:fgrep_v:setup:1:fgrep -v X | wgrib2
*/
int f_fgrep_v(ARG1) {
if (mode == -1) {
if (fgrep_count >= GREP_MAX) fatal_error("too many -grep options","");
fgrep = 1;
fgrep_store[fgrep_count] = arg1;
fgrep_type[fgrep_count] = 0;
fgrep_count++;
}
return 0;
}
int is_fgrep(const char *s) {
int i, j;
for (i = 0; i < fgrep_count; i++) {
if (fgrep_type[i] == 2) continue;
j = (strstr(s, fgrep_store[i]) == NULL);
if (j == fgrep_type[i]) return 1;
}
return 0;
}
|
constitute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/coder-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
switch (storage)
{
case CharPixel: image->depth=8*sizeof(unsigned char); break;
case DoublePixel: image->depth=8*sizeof(double); break;
case FloatPixel: image->depth=8*sizeof(float); break;
case LongPixel: image->depth=8*sizeof(unsigned long); break;
case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break;
case ShortPixel: image->depth=8*sizeof(unsigned short); break;
default: break;
}
length=strlen(map);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
case 'O':
case 'o':
{
image->alpha_trait=BlendPixelTrait;
break;
}
case 'C':
case 'c':
case 'm':
case 'M':
case 'Y':
case 'y':
case 'K':
case 'k':
{
image->colorspace=CMYKColorspace;
break;
}
case 'I':
case 'i':
{
image->colorspace=GRAYColorspace;
break;
}
default:
{
if (length == 1)
image->colorspace=GRAYColorspace;
break;
}
}
}
status=SetImageExtent(image,columns,rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
magick_unreferenced(image);
magick_unreferenced(pixels);
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsCoderAuthorized(const char *coder,
const PolicyRights rights,ExceptionInfo *exception)
{
if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",coder);
return(MagickFalse);
}
return(MagickTrue);
}
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
DecodeImageHandler
*decoder;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickBooleanType
status;
MagickStatusType
flags;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(read_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickDecoderSeekableStream(magick_info) != MagickFalse))
{
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
}
}
if (decoder != (DecodeImageHandler *) NULL)
{
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=decoder(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
if (status != MagickFalse)
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
decoder=GetImageDecoder(magick_info);
if (decoder == (DecodeImageHandler *) NULL)
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception);
image=(Image *) NULL;
if (status != MagickFalse)
image=(decoder)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) &&
(GetImageListLength(image) != 1))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones != (Image *) NULL)
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickPathExtent];
const char
*option;
const StringInfo
*profile;
ssize_t
option_type;
static const char
*source_date_epoch = (const char *) NULL;
static MagickBooleanType
epoch_initalized = MagickFalse;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if ((*magick_path == '\0') && (*next->magick == '\0'))
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
(void) GetImageProperty(next,"exif:*",exception);
(void) GetImageProperty(next,"icc:*",exception);
(void) GetImageProperty(next,"iptc:*",exception);
(void) GetImageProperty(next,"xmp:*",exception);
value=GetImageProperty(next,"exif:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
if (strchr(value,',') != (char *) NULL)
next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value != (char *) NULL)
{
option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse,
value);
if (option_type >= 0)
next->units=(ResolutionType) option_type;
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
SetGeometry(next,&geometry);
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
if (epoch_initalized == MagickFalse)
{
source_date_epoch=getenv("SOURCE_DATE_EPOCH");
epoch_initalized=MagickTrue;
}
if (source_date_epoch == (const char *) NULL)
{
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime,
MagickPathExtent,timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
}
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
{
option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse,
option);
if (option_type >= 0)
next->dispose=(DisposeType) option_type;
}
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
if (GetBlobError(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes != 0)
{
(void) CopyMagickString(read_filename,read_info->filename,
MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
scene=(ssize_t) read_info->scene;
for ( ; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,
read_filename,(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
}
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
blob=Base64Decode(++p,&length);
if (length == 0)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
for (p=content; (*p != '/') && (*p != '\0'); p++) ;
if (*p != '\0')
{
char
*q;
ssize_t
i;
/*
Extract media type.
*/
if (LocaleNCompare(++p,"x-",2) == 0)
p+=2;
(void) strcpy(read_info->filename,"data.");
q=read_info->filename+5;
for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++)
*q++=(*p++);
*q++='\0';
}
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
EncodeImageHandler
*encoder;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
/*
Call appropriate image writer based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IsStringTrue(option) != MagickFalse) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IsTaintImage(image) == MagickFalse) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickEncoderSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
encoder=GetImageEncoder(magick_info);
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
if (sans_exception->severity == PolicyError)
magick_info=GetMagickInfo(write_info->magick,exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,
MagickPathExtent);
encoder=GetImageEncoder(magick_info);
}
if (encoder == (EncodeImageHandler *) NULL)
{
magick_info=GetMagickInfo(image->magick,exception);
encoder=GetImageEncoder(magick_info);
if (encoder == (EncodeImageHandler *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if (encoder != (EncodeImageHandler *) NULL)
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=IsCoderAuthorized(write_info->magick,WritePolicyRights,
exception);
if (status != MagickFalse)
status=encoder(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
if (GetBlobError(image) != MagickFalse)
ThrowWriterException(FileOpenError,"UnableToWriteFile");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,
MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(p,WriteImageTag,progress,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
|
nqueens-1.c | /* { dg-do run } */
/* { dg-options "-O2 -fopenmp" } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdio.h>
#include <string.h>
int cnt;
#pragma omp threadprivate (cnt)
void
nqueens (char *a, int n, int pos)
{
/* b[i] = j means the queen in i-th row is in column j. */
char b[pos + 1];
int i, j;
memcpy (b, a, pos);
for (i = 0; i < n; i++)
{
for (j = 0; j < pos; j++)
if (b[j] == i || b[j] == i + pos - j || i == b[j] + pos - j)
break;
if (j < pos)
continue;
if (pos == n - 1)
/* Found a solution. Could output it here. */
++cnt;
else
{
b[pos] = i;
#pragma omp task
nqueens (b, n, pos + 1);
}
}
}
int
main (int argc, char **argv)
{
int n = 8;
if (argc >= 2)
n = strtoul (argv[1], NULL, 0);
if (n < 1 || n > 127)
{
fprintf (stderr, "invalid count %d\n", n);
return 1;
}
cnt = 0;
double stime = omp_get_wtime ();
nqueens ("", n, 0);
printf ("serial N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
#pragma omp parallel
cnt = 0;
stime = omp_get_wtime ();
int tempcnt = 0;
#pragma omp parallel reduction (+:tempcnt)
{
#pragma omp single
nqueens ("", n, 0);
tempcnt = cnt;
}
cnt = tempcnt;
printf ("parallel N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
return 0;
}
|
nowait-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This example is extracted from a paper:
Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013
Some threads may finish the for loop early and execute errors = dt[9]+1
while another thread may still be simultaneously executing
the for worksharing region by writing to d[9], causing data races.
Data race pair: a[i]@72:7 vs. a[9]@75:13.
*/
#include <stdio.h>
int main()
{
int i,error;
int len = 1000;
int a[1000], b=5;
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel shared(b, error)
{
#pragma omp for nowait
for(i = 0; i < len; i++)
a[i] = b + a[i]*5;
#pragma omp single
error = a[9] + 1;
}
printf ("error = %d\n", error);
return 0;
}
|
convolution_winograd_transform_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12));
float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16));
float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20));
float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24));
float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28));
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1_u16(output0, vcvt_bf16_f32(_out00));
vst1_u16(output0 + 8, vcvt_bf16_f32(_out02));
vst1_u16(output0 + 16, vcvt_bf16_f32(_out04));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1_u16(output0 + 4, vcvt_bf16_f32(_out01));
vst1_u16(output0 + 12, vcvt_bf16_f32(_out03));
vst1_u16(output0 + 20, vcvt_bf16_f32(_out05));
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0));
float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4));
float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8));
float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12));
float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16));
float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20));
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1_u16(output0, vcvt_bf16_f32(_out00));
vst1_u16(output0 + 4, vcvt_bf16_f32(_out01));
vst1_u16(output0 + 8, vcvt_bf16_f32(_out02));
vst1_u16(output0 + 12, vcvt_bf16_f32(_out03));
output0 += outw * 4;
}
}
}
}
}
|
DRB030-truedep1-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program has data races due to true dependence within a loop.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<len-1;i++)
a[i+1]=a[i]+1;
omprace_fini();
return 0;
}
|
big.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main()
{
//Fails with 512 * 4081
long size = 512 * 5000;
int *data = (int*)malloc(size * sizeof(int));
#pragma omp parallel for
for( long i = 0; i < 512 * 5000; i++)
{
int rank = omp_get_thread_num();
/* printf("rank: %d, before %ld\n", rank, i); */
data[i] = rank;
/* printf("rank: %d, after %ld\n", rank, i); */
}
free(data);
return 0;
}
|
core_dlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlauum.c, normal z -> d, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lauum
*
* Computes the product U * U^T or L^T * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular factor U or L.
* On exit, if uplo = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^T;
* if uplo = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^T * L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] info
* - 0 on successful exit
* - < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dlauum(plasma_enum_t uplo,
int n,
double *A, int lda)
{
return LAPACKE_dlauum_work(LAPACK_COL_MAJOR,
lapack_const(uplo), n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_dlauum(plasma_enum_t uplo,
int n,
double *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_dlauum(uplo, n, A, lda);
if (info != PlasmaSuccess) {
plasma_coreblas_error("core_dlauum() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
NeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
#include <chrono>
#if defined(GPU)
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <typeinfo>
#include <cuda_fp16.h>
#include "inc/Core/Common/cuda/KNN.hxx"
#include "inc/Core/Common/cuda/params.h"
#endif
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph(): m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_fNeighborhoodScale(2.0),
m_fCEFScale(2.0),
m_fRNGFactor(1.0),
m_iRefineIter(2),
m_iCEF(1000),
m_iAddCEF(500),
m_iMaxCheckForRefineGraph(10000),
m_iGPUGraphType(2),
m_iGPURefineSteps(0),
m_iGPURefineDepth(2),
m_iGPULeafSize(500),
m_iheadNumGPUs(1),
m_iTPTBalanceFactor(2)
{}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
DimensionType* correct = new DimensionType[samples];
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < samples; i++)
{
SizeType x = COMMON::Utils::rand(m_iGraphSize);
//int x = i;
COMMON::QueryResultSet<void> query(nullptr, m_iCEF);
for (SizeType y = 0; y < m_iGraphSize; y++)
{
if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue;
float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y));
query.AddPoint(y, dist);
}
query.SortResult();
SizeType * exact_rng = new SizeType[m_iNeighborhoodSize];
RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF);
correct[i] = 0;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if (exact_rng[j] == -1) {
correct[i] += m_iNeighborhoodSize - j;
break;
}
for (DimensionType k = 0; k < m_iNeighborhoodSize; k++)
if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) {
correct[i]++;
break;
}
}
delete[] exact_rng;
}
float acc = 0;
for (SizeType i = 0; i < samples; i++) acc += float(correct[i]);
acc = acc / samples / m_iNeighborhoodSize;
delete[] correct;
return acc;
}
#if defined(GPU)
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
SizeType initSize;
SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize);
// Build the entire RNG graph, both builds the KNN and refines it to RNG
buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize, m_iheadNumGPUs, m_iTPTBalanceFactor);
if (idmap != nullptr) {
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (SizeType i = 0; i < m_iGraphSize; i++) {
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end())
m_pNeighborhoodGraph[i][j] = iter->second;
}
}
}
}
#else
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>> & leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.emplace_back(first, last);
}
else
{
std::vector<float> Mean(index->GetFeatureDim(), 0);
int iIteration = 100;
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] += v[k];
}
}
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(index->GetFeatureDim());
for (DimensionType j = 0; j < index->GetFeatureDim(); j++)
{
Variance.emplace_back(j, 0.0f);
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist*dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<SizeType> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[index->GetFeatureDim() - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (SizeType j = 0; j < count; j++)
{
Val[j] = 0;
const T* v = (const T*)index->GetSample(indices[first + j]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (SizeType j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
const T* v = (const T*)index->GetSample(indices[i]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
PartitionByTptree<T>(index, indices, first, i - 1, leaves);
PartitionByTptree<T>(index, indices, i, last, leaves);
}
}
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize));
std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>());
for (SizeType i = 0; i < m_iGraphSize; i++)
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
auto t1 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n");
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i);
}
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n");
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++)
{
SizeType start_index = TptreeLeafNodes[i][j].first;
SizeType end_index = TptreeLeafNodes[i][j].second;
if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100));
for (SizeType x = start_index; x < end_index; x++)
{
for (SizeType y = x + 1; y <= end_index; y++)
{
SizeType p1 = TptreeDataIndices[i][x];
SizeType p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count());
}
#endif
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n");
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = (DimensionType)(ceil(m_iNeighborhoodSize * m_fNeighborhoodScale));
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n");
return;
}
auto t1 = std::chrono::high_resolution_clock::now();
BuildInitKNNGraph<T>(index, idmap);
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
RefineGraph<T>(index, idmap);
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count());
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
for (int iter = 0; iter < m_iRefineIter - 1; iter++)
{
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, (int)(m_iCEF * m_fCEFScale));
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
m_iNeighborhoodSize = (DimensionType)(m_iNeighborhoodSize / m_fNeighborhoodScale);
if (m_iRefineIter > 0) {
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF);
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
else {
LOG(Helper::LogLevel::LL_Info, "Graph Acc: %f\n", GraphAccuracyEstimation(index, 100, idmap));
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices,
std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::shared_ptr<NeighborhoodGraph> tmp;
if (newGraph == nullptr) {
tmp = NeighborhoodGraph::CreateInstance(Type());
newGraph = tmp.get();
}
SizeType R = (SizeType)indices.size();
newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
newGraph->m_iGraphSize = R;
newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < R; i++)
{
if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100));
SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i];
COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1);
index->RefineSearchIndex(query, false);
RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1);
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
{
if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]];
if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second;
}
if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end())
outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second;
}
if (output != nullptr) newGraph->SaveGraph(output);
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1);
index->RefineSearchIndex(query, searchDeleted);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1);
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= CEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
inline std::uint64_t BufferSize() const
{
return m_pNeighborhoodGraph.BufferSize();
}
ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(input, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(std::string sGraphFilename, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(char* pGraphMemFile, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ErrorCode::Success;
}
ErrorCode SaveGraph(std::string sGraphFilename) const
{
LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveGraph(ptr);
}
ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const
{
IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize);
IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize);
for (int i = 0; i < m_iGraphSize; i++)
IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]);
LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize);
return ErrorCode::Success;
}
inline ErrorCode AddBatch(SizeType num)
{
ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num);
if (ret != ErrorCode::Success) return ret;
m_iGraphSize += num;
return ErrorCode::Success;
}
inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; }
inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; }
void Update(SizeType row, DimensionType col, SizeType val) {
std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]);
m_pNeighborhoodGraph[row][col] = val;
}
inline void SetR(SizeType rows) {
m_pNeighborhoodGraph.SetR(rows);
m_iGraphSize = rows;
}
inline SizeType R() const { return m_iGraphSize; }
inline std::string Type() const { return m_pNeighborhoodGraph.Name(); }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
SizeType m_iGraphSize;
COMMON::Dataset<SizeType> m_pNeighborhoodGraph;
FineGrainedLock m_dataUpdateLock;
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
DimensionType m_iNeighborhoodSize;
float m_fNeighborhoodScale, m_fCEFScale, m_fRNGFactor;
int m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize, m_iheadNumGPUs, m_iTPTBalanceFactor;
};
}
}
#endif
|
deconvolution_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packn, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias_data_ptr)
{
_sum = vle32_v_f32m1(bias_data_ptr + p * packn, vl);
}
const float* kptr = (const float*)weight_data_packn.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * packn;
int k = y * kernel_w + x;
for (int l = 0; l < packn; l++)
{
float val = *sptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr + k * packn * packn + packn * l, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
}
}
}
kptr += maxk * packn * packn;
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse32_v_f32m1(outptr + j * packn, _sum, vl);
}
outptr += outw * packn;
}
}
}
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
ten_tusscher_2004_epi_S1_3.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1_3.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7781728901090,0.00123349870343949,0.784809889318744,0.784547392738085,0.000169596490364688,0.487274781980815,0.00289668567203959,0.999998415889729,1.86706803556251e-08,1.83887682327320e-05,0.999777287266349,1.00756607610598,0.999999160062542,3.39867729896090e-05,0.592251587252171,9.37662819093271,140.159936788276};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.8994716023310,0.000314898731878021,0.000156213524980972,0.000500074781915997,0.266864980659979,0.210551078794501,0.0657802089893208,2.85046969353601,0.0146506603832578,2.33945156719839,1099.72957852790,0.000431840298681176,0.479647775253583,0.0184750516443378,0.00580287376612870,1.67786611618970e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
msl_raptor_backend.h | /*
Copyright 2020 Benjamin Ramtoula
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include "UKF.h"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <numeric>
#include <opencv2/core/eigen.hpp>
#include <vector>
#include <iostream>
#include "omp.h"
#include <tuple>
#include <math.h>
// #include <functional>
#define COS_45 0.52532198881
// -------------------------------------------------------------------
// ----------------------- START PARAMETER STRUCTS -------------------
// -------------------------------------------------------------------
namespace msl_raptor_backend
{
typedef Eigen::Matrix<double, 7, 1> PoseVec;
typedef Eigen::Matrix<double, 6, 1> GradVec;
struct CameraParams
{
cv::Mat dist_coeffs;
cv::Mat K;
Eigen::Matrix3d K_inv;
cv::Mat rvec;
cv::Mat tvec;
Eigen::Affine3d tf_ego_cam;
Eigen::Affine3f tf_cam_ego;
void init(const float ppx,
const float ppy,
const float fx,
const float fy,
const Eigen::Affine3f &extrinsics_in,
const cv::Mat &dist_coeffs_in)
{
cv::Mat cam_matrix = cv::Mat::eye(3, 3, CV_32F);
cam_matrix.at<float>(0, 0) = fx;
cam_matrix.at<float>(1, 1) = fy;
cam_matrix.at<float>(0, 2) = ppx;
cam_matrix.at<float>(1, 2) = ppy;
init(cam_matrix, extrinsics_in, dist_coeffs_in);
}
void init(const std::vector<float> &camera_matrix_in,
const std::vector<float> &rvec_in,
const std::vector<float> &tvec_in,
const std::vector<float> &dist_coeffs_in)
{
cv::Mat camera_matrix = cv::Mat(camera_matrix_in, true).reshape(1, 3);
cv::Mat rvec = cv::Mat(rvec_in, true);
cv::Mat tvec = cv::Mat(tvec_in, true);
cv::Mat dist_coeffs = cv::Mat(dist_coeffs_in, true);
init(camera_matrix, rvec, tvec, dist_coeffs);
}
void init(const cv::Mat &camera_matrix_in,
const Eigen::Affine3f &extrinsics,
const cv::Mat &dist_coeffs_in)
{
cv::Mat rvec(1, 3, CV_32F);
cv::Mat tvec(1, 3, CV_32F);
cv::Mat rmat(3, 3, CV_32F);
Eigen::MatrixXf rmat_eigen = extrinsics.matrix().topLeftCorner<3, 3>();
Eigen::MatrixXf tvec_eigen = extrinsics.matrix().topRightCorner<3, 1>();
cv::eigen2cv(rmat_eigen, rmat);
cv::eigen2cv(tvec_eigen, tvec);
cv::Rodrigues(rmat, rvec);
init(camera_matrix_in, rvec, tvec, dist_coeffs_in);
}
void init(const cv::Mat &camera_matrix_in,
const cv::Mat &rvec_in,
const cv::Mat &tvec_in,
const cv::Mat &dist_coeffs_in)
{
K = camera_matrix_in.clone();
Eigen::Matrix3f K_eigen;
cv::cv2eigen(K, K_eigen);
K_inv = K_eigen.cast<double>().inverse();
rvec = rvec_in.clone();
tvec = tvec_in.clone();
cv::Mat rot_mat;
cv::Rodrigues(rvec, rot_mat);
Eigen::Matrix3f rot_mat_eigen;
cv::cv2eigen(rot_mat, rot_mat_eigen);
// ;
tf_ego_cam = Eigen::Translation3d(double(tvec.at<float>(0)),
double(tvec.at<float>(1)),
double(tvec.at<float>(2))) *
Eigen::AngleAxisd(rot_mat_eigen.cast<double>());
tf_cam_ego = tf_ego_cam.inverse().cast<float>();
dist_coeffs = dist_coeffs_in.clone();
}
void updateIntrinsics(const float ppx,
const float ppy,
const float fx,
const float fy,
const cv::Mat &dist_coeffs_in)
{
cv::Mat cam_matrix = cv::Mat::eye(3, 3, CV_32F);
cam_matrix.at<float>(0, 0) = fx;
cam_matrix.at<float>(1, 1) = fy;
cam_matrix.at<float>(0, 2) = ppx;
cam_matrix.at<float>(1, 2) = ppy;
updateIntrinsics(cam_matrix, dist_coeffs);
}
void updateIntrinsics(const std::vector<float> &camera_matrix_in,
const std::vector<float> &dist_coeffs_in)
{
cv::Mat camera_matrix = cv::Mat(camera_matrix_in, true).reshape(1, 3);
cv::Mat dist_coeffs = cv::Mat(dist_coeffs_in, true);
updateIntrinsics(camera_matrix, dist_coeffs);
}
void updateIntrinsics(const cv::Mat &camera_matrix_in,
const cv::Mat &dist_coeffs_in)
{
K = camera_matrix_in.clone();
Eigen::Matrix3f K_eigen;
cv::cv2eigen(K, K_eigen);
K_inv = K_eigen.cast<double>().inverse();
dist_coeffs = dist_coeffs_in.clone();
}
void updateExtrinsics(const Eigen::Affine3d &extrinsics)
{
cv::Mat rmat(3, 3, CV_32F);
Eigen::MatrixXf rmat_eigen =
extrinsics.matrix().topLeftCorner<3, 3>().cast<float>();
Eigen::MatrixXf tvec_eigen =
extrinsics.matrix().topRightCorner<3, 1>().cast<float>();
cv::eigen2cv(rmat_eigen, rmat);
cv::eigen2cv(tvec_eigen, tvec);
cv::Rodrigues(rmat, rvec);
tf_ego_cam = extrinsics;
tf_cam_ego = tf_ego_cam.inverse().cast<float>();
}
void updateExtrinsics(const cv::Mat &rvec_in, const cv::Mat &tvec_in)
{
cv::Mat rot_mat;
rvec = rvec_in;
tvec = tvec_in;
cv::Rodrigues(rvec, rot_mat);
Eigen::Matrix3f rot_mat_eigen;
cv::cv2eigen(rot_mat, rot_mat_eigen);
// ;
tf_ego_cam = Eigen::Translation3d(double(tvec.at<float>(0)),
double(tvec.at<float>(1)),
double(tvec.at<float>(2))) *
Eigen::AngleAxisd(rot_mat_eigen.cast<double>());
tf_cam_ego = tf_ego_cam.inverse().cast<float>();
}
void updateExtrinsics(const std::vector<float> &rvec_in,
const std::vector<float> &tvec_in)
{
cv::Mat rvec = cv::Mat(rvec_in, true);
cv::Mat tvec = cv::Mat(tvec_in, true);
updateExtrinsics(rvec, tvec);
}
CameraParams(){};
CameraParams(const float ppx,
const float ppy,
const float fx,
const float fy,
const Eigen::Affine3f &extrinsics,
const cv::Mat &dist_coeffs_in)
{
init(ppx, ppy, fx, fy, extrinsics, dist_coeffs_in);
}
CameraParams(const cv::Mat &camera_matrix_in,
const Eigen::Affine3f &extrinsics,
const cv::Mat &dist_coeffs_in)
{
init(camera_matrix_in, extrinsics, dist_coeffs_in);
}
CameraParams(const std::vector<float> &camera_matrix_in,
const std::vector<float> &rvec_in,
const std::vector<float> &tvec_in,
const std::vector<float> &dist_coeffs_in)
{
init(camera_matrix_in, rvec_in, tvec_in, dist_coeffs_in);
}
};
struct ObjPoseInitParams
{
// For optimising the initial bounding box. One step size and learning rate per position (3) and quaternion value (4). Variable number of initial pose depths.
GradVec grad_comp_step_sizes;
GradVec lr;
// Eigen::MatrixXd init_pose_guesses;
std::vector<PoseVec> init_pose_guesses;
double momentum;
int max_steps;
int conv_steps;
int period_lower_lr;
ObjPoseInitParams(){};
ObjPoseInitParams(
const std::vector<double> &grad_comp_step_sizes_in,
const std::vector<double> &lr_in,
const std::vector<PoseVec> &init_pose_guesses_in,
const double momentum_in,
const int max_steps_in,
const int conv_steps_in,
const int period_lower_lr_in)
{
grad_comp_step_sizes = GradVec::Map(
grad_comp_step_sizes_in.data(), grad_comp_step_sizes_in.size());
lr = GradVec::Map(lr_in.data(), lr_in.size());
momentum = momentum_in;
max_steps = max_steps_in;
conv_steps = conv_steps_in;
period_lower_lr = period_lower_lr_in;
init_pose_guesses = init_pose_guesses_in;
}
};
// Object-specific parameters, contains UKF parameters and properties associated to one object to track:
// Initial UKF state covariance, measurement noise, process noise, object dimensions or approximate point cloud.
struct ObjParams
{
Eigen::MatrixXf model_points_ado;
Eigen::MatrixXf model_points_ado_aug_t;
float width;
float height;
float length;
float aspect_ratio;
Eigen::VectorXd state_cov_diags;
Eigen::VectorXd process_noise_cov_diags;
Eigen::VectorXd measure_noise_cov_diags;
ObjPoseInitParams obj_pose_init_params;
ObjParams(){};
ObjParams(const Eigen::MatrixXf &model_points_ado_in,
const std::vector<double> &state_cov_diags_in,
const std::vector<double> &process_noise_cov_diags_in,
const std::vector<double> &measure_noise_cov_diags_in,
const ObjPoseInitParams &obj_pose_init_params_in)
{
init(model_points_ado_in,
state_cov_diags_in,
process_noise_cov_diags_in,
measure_noise_cov_diags_in,
obj_pose_init_params_in);
}
ObjParams(float width,
float height,
float length,
const std::vector<double> &state_cov_diags_in,
const std::vector<double> &process_noise_cov_diags_in,
const std::vector<double> &measure_noise_cov_diags_in,
const ObjPoseInitParams &obj_pose_init_params_in)
{
float half_height = height / 2;
float half_width = width / 2;
float half_length = length / 2;
Eigen::Matrix<float, 8, 3> model_points;
model_points << half_width, half_height, half_length, //
half_width, half_height, -half_length, //
half_width, -half_height, -half_length, //
half_width, -half_height, half_length, //
-half_width, -half_height, half_length, //
-half_width, -half_height, -half_length, //
-half_width, half_height, -half_length, //
-half_width, half_height, half_length; //
init(model_points,
state_cov_diags_in,
process_noise_cov_diags_in,
measure_noise_cov_diags_in,
obj_pose_init_params_in);
}
void init(const Eigen::MatrixXf &model_points_ado_in,
const std::vector<double> &state_cov_diags_in,
const std::vector<double> &process_noise_cov_diags_in,
const std::vector<double> &measure_noise_cov_diags_in,
const ObjPoseInitParams &obj_pose_init_params_in)
{
model_points_ado = model_points_ado_in;
width =
model_points_ado.col(0).maxCoeff() - model_points_ado.col(0).minCoeff();
height =
model_points_ado.col(1).maxCoeff() - model_points_ado.col(1).minCoeff();
length =
model_points_ado.col(2).maxCoeff() - model_points_ado.col(2).minCoeff();
aspect_ratio = width / height;
model_points_ado_aug_t = model_points_ado;
model_points_ado_aug_t.conservativeResize(
model_points_ado_aug_t.rows(), model_points_ado_aug_t.cols() + 1);
model_points_ado_aug_t.col(model_points_ado_aug_t.cols() - 1) =
Eigen::VectorXf::Ones(model_points_ado_aug_t.rows());
model_points_ado_aug_t.transposeInPlace();
state_cov_diags = Eigen::VectorXd::Map(state_cov_diags_in.data(),
state_cov_diags_in.size());
process_noise_cov_diags = Eigen::VectorXd::Map(
process_noise_cov_diags_in.data(), process_noise_cov_diags_in.size());
measure_noise_cov_diags = Eigen::VectorXd::Map(
measure_noise_cov_diags_in.data(), measure_noise_cov_diags_in.size());
obj_pose_init_params = obj_pose_init_params_in;
}
};
} // namespace msl_raptor_backend
// -----------------------------------------------------------------
// ----------------------- END PARAMETER STRUCTS -------------------
// -----------------------------------------------------------------
// ------------------------------------------------------------------------------------
// ----------------------- START MSL-RAPTOR Back-end UKF definition -------------------
// ------------------------------------------------------------------------------------
// The measurement can be an axis-aligned bounding box (4 parameters) or an angled one (5 parameters with the angle). The class is templated on this factor.
template <bool aligned_bb>
class MSLRaptorUKF
{
typedef typename msl_raptor_backend::PoseVec PoseVec;
typedef typename msl_raptor_backend::GradVec GradVec;
public:
/** define state vector: <scalars, 3-vectors, quaternions> */
typedef kalman::Vector<0, 3, 1> StateVec; // the layout is: (pos x 3, vel x 3,
// angularvel x 3, attitude x 4)
typedef Eigen::Affine3d InputType;
/** define measurement vector <scalars, 3-vectors, quaternions> */
typedef typename std::conditional<
/* */ aligned_bb,
/* y? */ kalman::Vector<4, 0, 0>,
/* n? */ kalman::Vector<5, 0, 0>>::type MeasureVec;
static void init(kalman::UKF<MSLRaptorUKF, InputType> &ukf,
MSLRaptorUKF msl_raptor_ukf)
{
ukf.stateRootCov = msl_raptor_ukf.obj_params_.state_cov_diags.asDiagonal();
ukf.stateRootCov =
ukf.stateRootCov.llt()
.matrixU(); // note that we are using sqrt of actual cov matrix
ukf.measureNoiseRootCov =
msl_raptor_ukf.obj_params_.measure_noise_cov_diags.asDiagonal();
ukf.measureNoiseRootCov = ukf.measureNoiseRootCov.llt().matrixU();
ukf.processNoiseRootCov =
msl_raptor_ukf.obj_params_.process_noise_cov_diags.asDiagonal();
ukf.processNoiseRootCov =
ukf.processNoiseRootCov.llt()
.matrixU(); // note that we are using sqrt of actual cov matrix
ukf.ukfModel = msl_raptor_ukf;
}
static StateVec dF(const StateVec &state, const InputType &u)
{
StateVec out = state;
/* differentiate the quaternions automatically.
* Second argument specifies start of angular velocity params in state
* vector */
kalman::util::diffQuaternion(out, 6);
/* differentiate position automatically.
* arguments are: (output state vector, start of position param, end of
* position param, beginning of velocity param) */
kalman::util::diffPosition(out, 0, 3, 3);
return out;
}
// Used if bounding box is angled
// Approximates a full UKF state (6DoF pose) from a 2D bounding box measurement. Relies on heuristics and assumptions.
template <bool is_aligned_bb = aligned_bb>
StateVec approxStateFromBbHeuristic(
typename std::enable_if<!is_aligned_bb, const MeasureVec &>::type bb)
{
std::vector<double> width_height;
// Vector containing bb width and height found from known object size and bb dimensions observed.
width_height = (obj_params_.width > obj_params_.height) ? ((bb(2) > bb(3)) ? std::vector<double>{bb(2), bb(3)} : std::vector<double>{bb(3), bb(2)}) : ((bb(2) > bb(3)) ? std::vector<double>{bb(3), bb(2)} : std::vector<double>{bb(2), bb(3)});
Eigen::Vector3d bb_center;
bb_center << bb.segment(0, 2), 1;
// Get a guess approximate size of object visible in the image. Assume that
// the object is at the same height as camera. Average between size of the
// smallest object side and the object oriented 45 deg.
float expected_average_width = 0.5 *
(COS_45 * (obj_params_.width + obj_params_.length) +
std::min(obj_params_.width, obj_params_.length));
double distance;
distance = cam_params_.K.at<float>(0, 0) * expected_average_width /
width_height[0];
// Compensate for object center point and not closest face
distance += obj_params_.length / 2;
Eigen::Vector3d pos = cam_params_.K_inv * distance * bb_center;
Eigen::Quaterniond quat;
quat = Eigen::Quaterniond(Eigen::AngleAxisd(bb[4], Eigen::Vector3d::UnitZ()));
pos = cam_params_.tf_ego_cam * pos;
StateVec s;
s << pos, 0, 0, 0, 0, 0, 0, quat.coeffs();
return s;
}
// Used if bounding box is aligned
// Approximates a full UKF state (6DoF pose) from a 2D bounding box measurement. Relies on heuristics and assumptions.
template <bool is_aligned_bb = aligned_bb>
StateVec approxStateFromBbHeuristic(
typename std::enable_if<is_aligned_bb, const MeasureVec &>::type bb)
{
std::vector<double> width_height;
// Vector containing bb width and height found from known object size and bb dimensions observed.
width_height = (obj_params_.width > obj_params_.height) ? ((bb(2) > bb(3)) ? std::vector<double>{bb(2), bb(3)} : std::vector<double>{bb(3), bb(2)}) : ((bb(2) > bb(3)) ? std::vector<double>{bb(3), bb(2)} : std::vector<double>{bb(2), bb(3)});
double d;
Eigen::Vector3d bb_center;
bb_center << bb.segment(0, 2), 1;
// Get a guess approximate size of object visible in the image. Assume that
// the object is at the same height as camera. Average between size of the
// smallest object side and the object oriented 45 deg.
float expected_average_width = 0.5 *
(COS_45 * (obj_params_.width + obj_params_.length) +
std::min(obj_params_.width, obj_params_.length));
d = cam_params_.K.at<float>(0, 0) * expected_average_width /
width_height[0];
// Compensate for object center point and not closest face
d += obj_params_.length / 2;
Eigen::Vector3d pos = cam_params_.K_inv * d * bb_center;
pos = cam_params_.tf_ego_cam * pos;
StateVec s;
s << pos, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
s.quat(0).setIdentity();
return s;
}
// Used if bounding box is aligned
template <bool is_aligned_bb = aligned_bb>
double compareTwoBoundingBoxes(
typename std::enable_if<is_aligned_bb, const MeasureVec &>::type bb1,
typename std::enable_if<is_aligned_bb, const MeasureVec &>::type bb2)
{
// Score uses sum of square differences between bounding box center coordinates and square root of their sizes
return 0.2 * pow(bb1[0] - bb2[0], 2) + 0.2 * pow(bb1[1] - bb2[1], 2) + pow(sqrt(bb1[2]) - sqrt(bb2[2]), 2) + pow(sqrt(bb1[3]) - sqrt(bb2[3]), 2);
}
// Used if bounding box is angled
template <bool is_aligned_bb = aligned_bb>
double compareTwoBoundingBoxes(
typename std::enable_if<!is_aligned_bb, const MeasureVec &>::type bb1,
typename std::enable_if<!is_aligned_bb, const MeasureVec &>::type bb2)
{
// TODO Include angle difference. Maybe use distances between each corner for angled boxes?
// Score uses sum of square differences between bounding box center coordinates and square root of their sizes
return 0.2 * pow(bb1[0] - bb2[0], 2) + 0.2 * pow(bb1[1] - bb2[1], 2) + pow(sqrt(bb1[2]) - sqrt(bb2[2]), 2) + pow(sqrt(bb1[3]) - sqrt(bb2[3]), 2);
}
/* measurement model definition */
MeasureVec boundingBoxFromPose(const PoseVec &pose)
{
StateVec s = StateVec::Zero();
s.head<3>() = pose.head<3>();
s.tail<4>() = pose.tail<4>();
return H(s);
}
PoseVec stateToPose(const StateVec &state_vec)
{
PoseVec pose;
pose.head<3>() = state_vec.head<3>();
pose.tail<4>() = state_vec.tail<4>();
return pose;
}
StateVec poseToState(const PoseVec &pose_vec)
{
StateVec state = StateVec::Zero();
state.head<3>() = pose_vec.head<3>();
state.tail<4>() = pose_vec.tail<4>();
return state;
}
std::tuple<double, StateVec> approxStatePoseOptimFromInit(const MeasureVec &target_bb, const StateVec &init_state)
{
PoseVec curr_pose, best_pose;
curr_pose = stateToPose(init_state);
best_pose = curr_pose;
GradVec latest_update, grad;
latest_update = GradVec::Zero();
grad = GradVec::Zero();
GradVec lr;
lr = obj_params_.obj_pose_init_params.lr;
// To keep track of errors and convergence
double lowest_err, latest_err, curr_err;
curr_err = compareTwoBoundingBoxes(target_bb, boundingBoxFromPose(curr_pose));
lowest_err = curr_err;
latest_err = curr_err;
int nb_same_err = 0;
int iter = 0;
Eigen::Matrix<double, 7, 7> poses_steps_r, poses_steps_l, steps;
MeasureVec bb_r, bb_l;
while (nb_same_err < obj_params_.obj_pose_init_params.conv_steps && iter < obj_params_.obj_pose_init_params.max_steps)
{
grad.setZero();
// Handle translation
PoseVec l, r;
// TODO Handle as matrix operation
for (int i = 0; i < 3; i++)
{
l = curr_pose;
r = curr_pose;
l[i] -= obj_params_.obj_pose_init_params.grad_comp_step_sizes[i];
r[i] += obj_params_.obj_pose_init_params.grad_comp_step_sizes[i];
bb_l = boundingBoxFromPose(l);
bb_r = boundingBoxFromPose(r);
grad[i] = (compareTwoBoundingBoxes(target_bb, bb_r) - compareTwoBoundingBoxes(target_bb, bb_l)) / (2 * obj_params_.obj_pose_init_params.grad_comp_step_sizes[i]);
}
// Handle rotation and step along the quaternion with respect to the Euler axis
Eigen::Quaterniond quat_l, quat_r, curr_quat;
curr_quat = Eigen::Quaterniond(curr_pose[6], curr_pose[3], curr_pose[4], curr_pose[5]);
for (int i = 3; i < 6; i++)
{
l = curr_pose;
r = curr_pose;
Eigen::Vector3d direction;
switch (i)
{
case 3:
direction = Eigen::Vector3d::UnitX();
break;
case 4:
direction = Eigen::Vector3d::UnitY();
break;
case 5:
direction = Eigen::Vector3d::UnitZ();
break;
}
quat_l = Eigen::Quaterniond(Eigen::AngleAxisd(-obj_params_.obj_pose_init_params.grad_comp_step_sizes[i], direction));
quat_r = Eigen::Quaterniond(Eigen::AngleAxisd(obj_params_.obj_pose_init_params.grad_comp_step_sizes[i], direction));
l.tail<4>() = (quat_l * curr_quat).coeffs();
r.tail<4>() = (quat_r * curr_quat).coeffs();
bb_l = boundingBoxFromPose(l);
bb_r = boundingBoxFromPose(r);
grad[i] = (compareTwoBoundingBoxes(target_bb, bb_r) - compareTwoBoundingBoxes(target_bb, bb_l)) / (2 * obj_params_.obj_pose_init_params.grad_comp_step_sizes[i]);
}
latest_update = obj_params_.obj_pose_init_params.momentum * latest_update + (1 - obj_params_.obj_pose_init_params.momentum) * grad;
curr_pose.head<3>() -= lr.head<3>().cwiseProduct(latest_update.head<3>());
Eigen::Quaterniond rot_applied;
rot_applied = Eigen::Quaterniond(Eigen::AngleAxisd(lr[3] * grad[3], Eigen::Vector3d::UnitX()) * Eigen::AngleAxisd(lr[4] * grad[4], Eigen::Vector3d::UnitY()) * Eigen::AngleAxisd(lr[5] * grad[5], Eigen::Vector3d::UnitZ()));
curr_pose.tail<4>() = (rot_applied * curr_quat).coeffs();
curr_err = compareTwoBoundingBoxes(target_bb, boundingBoxFromPose(curr_pose));
if (curr_err < lowest_err)
{
lowest_err = curr_err;
best_pose = curr_pose;
}
if (curr_err == latest_err)
{
nb_same_err++;
}
else
{
nb_same_err = 0;
}
if (iter % obj_params_.obj_pose_init_params.period_lower_lr == 0)
{
lr /= 2;
}
latest_err = curr_err;
iter++;
}
StateVec out_state;
out_state = poseToState(best_pose);
return std::make_tuple(lowest_err, out_state);
}
std::tuple<double, StateVec> approxStatePoseOptim(const MeasureVec &target_bb)
{
std::vector<std::tuple<double, StateVec>> outputs;
std::tuple<double, StateVec> output;
#pragma omp parallel for
for (int i = 0; i < obj_params_.obj_pose_init_params.init_pose_guesses.size(); i++)
{
output = approxStatePoseOptimFromInit(target_bb, poseToState(obj_params_.obj_pose_init_params.init_pose_guesses[i]));
outputs.push_back(output);
}
// Sort results by error, which is the first value of the returned tuple, stored in the vector.
std::sort(outputs.begin(), outputs.end(),
[](std::tuple<double, StateVec> const &lhs, std::tuple<double, StateVec> const &rhs)
{ return std::get<0>(lhs) < std::get<0>(rhs); });
return outputs[0];
}
std::tuple<double, StateVec> approxStatePoseOptimSingleThread(const MeasureVec &target_bb)
{
std::vector<std::tuple<double, StateVec>> outputs;
std::tuple<double, StateVec> output;
for (PoseVec const &init_pose_guess : obj_params_.obj_pose_init_params.init_pose_guesses)
{
output = approxStatePoseOptimFromInit(target_bb, poseToState(init_pose_guess));
outputs.push_back(output);
}
// Sort results by error, which is the first value of the returned tuple, stored in the vector.
std::sort(outputs.begin(), outputs.end(),
[](std::tuple<double, StateVec> const &lhs, std::tuple<double, StateVec> const &rhs)
{ return std::get<0>(lhs) < std::get<0>(rhs); });
return outputs[0];
}
// Converts the state with pose and velocities to a 4x4 transformation matrix relating the pose to the origin.
static void stateToTf(const StateVec &state, Eigen::Affine3f &tf)
{
tf = Eigen::Affine3f::Identity() *
Eigen::Translation3f(Eigen::Vector3f(state(0), state(1), state(2))) *
Eigen::Quaternionf(state.quat(0).w(),
state.quat(0).x(),
state.quat(0).y(),
state.quat(0).z());
}
// Used if bounding box is angled
// Converts an OpenCV rotated rectangle to a measurement vector for the UKF
template <bool is_aligned_bb = aligned_bb>
MeasureVec convertRotRectToMeasureVec(
typename std::enable_if<!is_aligned_bb, const cv::RotatedRect &>::type
rect)
{
MeasureVec out;
float angle, ar_meas;
ar_meas = rect.size.width / rect.size.height;
// Transform angle to wanted convention
if ((ar_meas > 1 && obj_params_.aspect_ratio < 1) ||
(ar_meas < 1 && obj_params_.aspect_ratio > 1))
{
angle = rect.angle + 90;
}
else
{
angle = rect.angle;
}
out << rect.center.x, rect.center.y, rect.size.width, rect.size.height,
angle * M_PI / 180;
return out;
}
// Used if bounding box is aligned
// Converts an OpenCV rotated rectangle to a measurement vector for the UKF
template <bool is_aligned_bb = aligned_bb>
static MeasureVec convertRotRectToMeasureVec(
typename std::enable_if<is_aligned_bb, const cv::RotatedRect &>::type
rect)
{
cv::Rect2f upright_rect = rect.boundingRect2f();
MeasureVec out;
// Top left corner position given
out << upright_rect.x + upright_rect.width / 2,
upright_rect.y + upright_rect.height / 2, upright_rect.width,
upright_rect.height;
return out;
}
// Measurement model definition, based on projecting the pose into the image using camera parameters.
MeasureVec H(const StateVec &state, const InputType &u = Eigen::Affine3d::Identity())
{
Eigen::Affine3f tf_ego_ado;
stateToTf(state, tf_ego_ado);
Eigen::MatrixXf model_points_cam;
model_points_cam = ((cam_params_.tf_cam_ego * tf_ego_ado).matrix() *
obj_params_.model_points_ado_aug_t)
.template topRows<3>();
model_points_cam.transposeInPlace();
std::vector<cv::Point2f> projected_points;
cv::Mat points_cv;
cv::eigen2cv(model_points_cam, points_cv);
cv::projectPoints(points_cv,
cv::Mat::zeros(3, 1, CV_32F),
cv::Mat::zeros(3, 1, CV_32F),
cam_params_.K,
cam_params_.dist_coeffs,
projected_points);
MeasureVec out =
convertRotRectToMeasureVec(cv::minAreaRect(projected_points));
return out;
}
/* process model definition */
/* Applied before the propagation using dF (defined above) and the
* integrators. Updates that rely on integrations of the state should be
* defined on the dF function. Here is good for changing the frame before
* integration. */
StateVec G(const StateVec &state, const InputType &tf_ego_egoprev)
{
StateVec out;
// Position
out.head<3>() = tf_ego_egoprev * state.head<3>();
// Linear velocity
out.segment<3>(3) = tf_ego_egoprev.linear() * state.segment<3>(3);
// Angular velocity
out.segment<3>(6) = tf_ego_egoprev.linear() * state.segment<3>(6);
// Quaternion representation of orientation
out.quat(0) = Eigen::Quaterniond(tf_ego_egoprev.linear() * state.quat(0));
return out;
}
void updateCamera(msl_raptor_backend::CameraParams cam_params)
{
cam_params_ = cam_params;
}
void updateCamIntrinsics(const float ppx,
const float ppy,
const float fx,
const float fy,
const cv::Mat &dist_coeffs_in)
{
cam_params_.updateIntrinsics(ppx, ppy, fx, fy, dist_coeffs_in);
}
void updateCamIntrinsics(const std::vector<float> &camera_matrix_in,
const std::vector<float> &dist_coeffs_in)
{
cam_params_.updateIntrinsics(camera_matrix_in, dist_coeffs_in);
}
void updateCamIntrinsics(const cv::Mat &camera_matrix_in,
const cv::Mat &dist_coeffs_in)
{
cam_params_.updateIntrinsics(camera_matrix_in, dist_coeffs_in);
}
void updateCamExtrinsics(const Eigen::Affine3d &extrinsics_mat)
{
cam_params_.updateExtrinsics(extrinsics_mat);
}
void updateCamExtrinsics(const cv::Mat &rvec_in, const cv::Mat &tvec_in)
{
cam_params_.updateExtrinsics(rvec_in, tvec_in);
}
void updateCamExtrinsics(const std::vector<float> &rvec_in,
const std::vector<float> &tvec_in)
{
cam_params_.updateExtrinsics(rvec_in, tvec_in);
}
// Update the UKF
void update(double delta_t,
const MeasureVec &z,
const InputType &input = Eigen::Affine3d::Identity())
{
ukf_->update(delta_t, z, input);
}
StateVec getState()
{
return ukf_->state;
}
Eigen::MatrixXd getStateRootCov()
{
return ukf_->stateRootCov;
}
// Initialise the MSL-RAPTOR UKF based on object, camera parameters, and an initial UKF state.
void init(msl_raptor_backend::ObjParams obj_params,
msl_raptor_backend::CameraParams cam_params,
StateVec init_state)
{
obj_params_ = obj_params;
cam_params_ = cam_params;
ukf_ = new kalman::UKF<MSLRaptorUKF, InputType>(*this);
ukf_->state = init_state;
}
MSLRaptorUKF(void) {}
MSLRaptorUKF(msl_raptor_backend::ObjParams obj_params,
msl_raptor_backend::CameraParams cam_params)
{
StateVec init_state = StateVec::Zero();
init_state.quat(0).setIdentity();
init(obj_params, cam_params, init_state);
}
// Initialise the MSL-RAPTOR UKF based on object, camera parameters, and an initial UKF state.
MSLRaptorUKF(msl_raptor_backend::ObjParams obj_params,
msl_raptor_backend::CameraParams cam_params,
StateVec init_state)
{
init(obj_params, cam_params, init_state);
}
// Initialise the MSL-RAPTOR UKF based on object, camera parameters, and a 2D bb from which to approximate a UKF state.
MSLRaptorUKF(msl_raptor_backend::ObjParams obj_params,
msl_raptor_backend::CameraParams cam_params,
MeasureVec bb_for_approx_init)
{
obj_params_ = obj_params;
cam_params_ = cam_params;
StateVec init_state = approxStateFromBbHeuristic(bb_for_approx_init);
init(obj_params, cam_params, init_state);
}
private:
msl_raptor_backend::ObjParams obj_params_;
msl_raptor_backend::CameraParams cam_params_;
kalman::UKF<MSLRaptorUKF, InputType> *ukf_;
};
// ------------------------------------------------------------------------------------
// ----------------------- END MSL-RAPTOR Back-end UKF definition -------------------
// ------------------------------------------------------------------------------------ |
NeoHookeanMaterial.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "NeoHookeanMaterial.h"
/*************************************************************************/
void NeoHookeanMaterial_forces(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
double* myRrows = mxGetPr(plhs[0]);
double* myRcoef = mxGetPr(plhs[1]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double Young = material_param[0];
double Poisson = material_param[1];
double mu = Young / (2.0 + 2.0 * Poisson);
double lambda = Young * Poisson /( (1.0 + Poisson) * (1.0-2.0*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myRrows,myRcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,NumQuadPoints,numRowsElements,nln2,nln,NumNodes,Id,mu,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double dP[dim][dim];
double P_Uh[dim][dim];
double GradV[dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[dim][nln][NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int ii = 0;
int a, b, i_c, j_c;
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < dim; i_c = i_c + 1 )
{
/* set gradV to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[d1][d2] = 0;
}
}
double rloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[i_c][d2] = gradphi[d2][a][q];
}
double P1[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P_Uh[d1][d2] = mu * pow23detF[q] * ( F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] )
+ 1.0 / 2.0 * bulk * ( pow2detF[q] - detF[q] + logdetF[q] ) * invFT[q][d1][d2];
}
}
rloc = rloc + Mdot( dim, GradV, P_Uh) * w[q];
}
myRrows[ie*nln*dim+ii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myRcoef[ie*nln*dim+ii] = rloc*detjac[ie];
ii = ii + 1;
}
}
}
}
/*************************************************************************/
/*************************************************************************/
void NeoHookeanMaterial_jacobian(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double Young = material_param[0];
double Poisson = material_param[1];
double mu = Young / (2.0 + 2.0 * Poisson);
double lambda = Young * Poisson /( (1.0 + Poisson) * (1.0-2.0*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myAcols,myArows,myAcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,NumQuadPoints,numRowsElements,nln2,nln,NumNodes,Id,mu,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double dP[dim][dim];
double P_Uh[dim][dim];
double GradV[dim][dim];
double GradU[dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[dim][nln][NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int ii = 0;
int a, b, i_c, j_c;
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < dim; i_c = i_c + 1 )
{
/* set gradV to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[d1][d2] = 0;
}
}
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < dim; j_c = j_c + 1 )
{
/* set gradU to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradU[d1][d2] = 0;
}
}
double aloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[i_c][d2] = gradphi[d2][a][q];
GradU[j_c][d2] = gradphi[d2][b][q];
}
/* volumetric part */
double dP_vol[dim][dim];
double dP_vol1[dim][dim];
double dP_vol2_tmp[dim][dim];
double dP_vol2[dim][dim];
MatrixScalar(dim, 0.5*bulk * (2.0*pow2detF[q] -detF[q] + 1.0)*Mdot(dim, invFT[q], GradU),
invFT[q], dP_vol);
MatrixProductAlphaT2(dim, 0.5*bulk * ( - pow2detF[q] + detF[q] - logdetF[q]), invFT[q], GradU, dP_vol2_tmp);
MatrixProductAlpha(dim, 1.0, dP_vol2_tmp, invFT[q], dP_vol2);
MatrixSum(dim, dP_vol, dP_vol2);
/* isochoric part */
double dP_iso[dim][dim];
double dP_iso1[dim][dim];
double dP_iso24[dim][dim];
double dP_iso3[dim][dim];
double dP_iso5[dim][dim];
double dP_iso5_tmp[dim][dim];
double dP_iso5_tmp2[dim][dim];
MatrixScalar(dim, -2.0 / 3.0 * mu * pow23detF[q] * Mdot(dim, invFT[q], GradU),
F[q], dP_iso1);
MatrixScalar(dim, mu * pow23detF[q] *
( 2.0 / 9.0 * I_C[q] * Mdot(dim, invFT[q], GradU)
-2.0 / 3.0 * Mdot(dim, F[q], GradU) ),
invFT[q], dP_iso24);
MatrixScalar(dim, mu * pow23detF[q], GradU, dP_iso3);
MatrixProductAlphaT2(dim, 1.0, invFT[q], GradU, dP_iso5_tmp);
MatrixProductAlpha(dim, 1.0, dP_iso5_tmp, invFT[q], dP_iso5_tmp2);
MatrixScalar(dim, 1.0 / 3.0 * mu * pow23detF[q] * I_C[q] , dP_iso5_tmp2, dP_iso5);
/* Sum all contributes */
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
dP[d1][d2] = dP_vol[d1][d2]
+ dP_iso1[d1][d2]
+ dP_iso24[d1][d2]
+ dP_iso3[d1][d2]
+ dP_iso5[d1][d2];
}
}
aloc = aloc + Mdot( dim, GradV, dP) * w[q];
}
myArows[ie*nln2*dim*dim+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myAcols[ie*nln2*dim*dim+iii] = elements[b+ie*numRowsElements] + j_c * NumNodes;
myAcoef[ie*nln2*dim*dim+iii] = aloc*detjac[ie];
iii = iii + 1;
}
}
}
}
}
}
/*************************************************************************/
void NeoHookeanMaterial_jacobianFast(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double Young = material_param[0];
double Poisson = material_param[1];
double mu = Young / (2.0 + 2.0 * Poisson);
double lambda = Young * Poisson /( (1.0 + Poisson) * (1.0-2.0*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myAcols,myArows,myAcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,NumQuadPoints,numRowsElements,nln2,nln,NumNodes,Id,mu,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[NumQuadPoints][dim][nln];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[q][d1][k] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[q][d1][k] = gradphi[q][d1][k] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[q][d2][k];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant3(dim, F[q]);
MatrixInvT3(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int a, b, i_c, j_c;
double aloc[nln][dim][nln][dim];
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
aloc[a][i_c][b][j_c] = 0.0;
}
}
}
}
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
aloc[a][0][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][0]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][0][b] + (bulk*invFT[q][0][0]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][0][1]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][1]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][1][b] + (bulk*invFT[q][0][1]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][0][2]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][2]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][2][b] + (bulk*invFT[q][0][2]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][0][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][0]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][0]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][1][1]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][1]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][1]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][1][2]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][2]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][2]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][0][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][0]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][0]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][2][1]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][1]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][1]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][2][2]*((I_C[q]*invFT[q][0][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][2]*((bulk*invFT[q][0][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][0][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][0][2]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][0][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][0][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][1][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][0]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][0]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][0][1]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][1]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][1]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][0][2]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][2]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][2]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][1][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][0]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][0][b] + (bulk*invFT[q][1][0]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][1][1]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][1]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][1][b] + (bulk*invFT[q][1][1]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][1][2]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][2]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][2][b] + (bulk*invFT[q][1][2]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][1][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][0]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][0]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][2][1]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][1]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][1]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][2][2]*((I_C[q]*invFT[q][1][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][2]*((bulk*invFT[q][1][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][1][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][1][2]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][1][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][1][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][2][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][0]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][0]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][0][1]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][1]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][1]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][0][2]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][0][2]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][2]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][2][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][0]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][0]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][1][1]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][1]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][1]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][1][2]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][1][2]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + (bulk*invFT[q][2][2]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/9.0) ) * w[q];
aloc[a][2][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][0]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][0][b] + (bulk*invFT[q][2][0]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][1][a]*(invFT[q][2][1]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][1]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][1][b] + (bulk*invFT[q][2][1]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) + gradphi[q][2][a]*(invFT[q][2][2]*((I_C[q]*invFT[q][2][0]*mu*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*gradphi[q][2][b])/3.0) - invFT[q][2][2]*((bulk*invFT[q][2][0]*gradphi[q][0][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][1]*gradphi[q][1][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0 + (bulk*invFT[q][2][2]*gradphi[q][2][b]*(logdetF[q] - detF[q] + pow2detF[q]))/2.0) + mu*pow23detF[q]*gradphi[q][2][b] + (bulk*invFT[q][2][2]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b])*(2*pow2detF[q] - detF[q] + 1))/2.0 - (2*F[q][2][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + (2*I_C[q]*invFT[q][2][2]*mu*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/9.0) ) * w[q];
}
}
}
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
myArows[ie*nln2*9+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myAcols[ie*nln2*9+iii] = elements[b+ie*numRowsElements] + j_c * NumNodes;
myAcoef[ie*nln2*9+iii] = aloc[a][i_c][b][j_c]*detjac[ie];
iii = iii + 1;
}
}
}
}
}
}
/*************************************************************************/
void NeoHookeanMaterial_prestress(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[3] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
plhs[4] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
double* myRrows = mxGetPr(plhs[3]);
double* myRcoef = mxGetPr(plhs[4]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
plhs[5] = mxCreateDoubleMatrix(noe*NumQuadPoints*dim*dim, 1, mxREAL);
double* S_np1 = mxGetPr(plhs[5]);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* S_0 = mxGetPr(prhs[11]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double Young = material_param[0];
double Poisson = material_param[1];
double mu = Young / (2.0 + 2.0 * Poisson);
double lambda = Young * Poisson /( (1.0 + Poisson) * (1.0-2.0*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myAcols,myArows,myAcoef,myRrows,myRcoef,S_np1,U_h,S_0) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,NumQuadPoints,numRowsElements,nln2,nln,NumNodes,Id,mu,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[NumQuadPoints][dim][nln];
double Stress[NumQuadPoints][dim][dim];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[q][d1][k] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[q][d1][k] = gradphi[q][d1][k] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[q][d2][k];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant3(dim, F[q]);
MatrixInvT3(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
double P[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P[d1][d2] = mu * pow23detF[q] * ( F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] )
+ 1.0 / 2.0 * bulk * ( pow2detF[q] - detF[q] + logdetF[q] ) * invFT[q][d1][d2];
}
}
MatrixProductAlphaT1(dim, 1.0, invFT[q], P, Stress[q] );
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
PRESTRESS_NP1(ie,q,d1,d2) = Stress[q][d1][d2] + PRESTRESS_0(ie,q,d1,d2);
}
}
}
/* Assemble Jacobian */
int iii = 0;
int a, b, i_c, j_c;
double aloc[nln][dim][nln][dim];
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
aloc[a][i_c][b][j_c] = 0.0;
}
}
}
}
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
aloc[a][0][b][0] += ( gradphi[q][0][a]*(PRESTRESS_NP1(ie,q,0,0)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,0)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,0)*gradphi[q][2][b]) + gradphi[q][1][a]*(PRESTRESS_NP1(ie,q,0,1)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,1)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,1)*gradphi[q][2][b]) + gradphi[q][2][a]*(PRESTRESS_NP1(ie,q,0,2)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,2)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,2)*gradphi[q][2][b]) ) * w[q];
aloc[a][1][b][1] += ( gradphi[q][0][a]*(PRESTRESS_NP1(ie,q,0,0)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,0)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,0)*gradphi[q][2][b]) + gradphi[q][1][a]*(PRESTRESS_NP1(ie,q,0,1)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,1)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,1)*gradphi[q][2][b]) + gradphi[q][2][a]*(PRESTRESS_NP1(ie,q,0,2)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,2)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,2)*gradphi[q][2][b]) ) * w[q];
aloc[a][2][b][2] += ( gradphi[q][0][a]*(PRESTRESS_NP1(ie,q,0,0)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,0)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,0)*gradphi[q][2][b]) + gradphi[q][1][a]*(PRESTRESS_NP1(ie,q,0,1)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,1)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,1)*gradphi[q][2][b]) + gradphi[q][2][a]*(PRESTRESS_NP1(ie,q,0,2)*gradphi[q][0][b] + PRESTRESS_NP1(ie,q,1,2)*gradphi[q][1][b] + PRESTRESS_NP1(ie,q,2,2)*gradphi[q][2][b]) ) * w[q];
}
}
}
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
myArows[ie*nln2*9+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myAcols[ie*nln2*9+iii] = elements[b+ie*numRowsElements] + j_c * NumNodes;
myAcoef[ie*nln2*9+iii] = aloc[a][i_c][b][j_c]*detjac[ie];
iii = iii + 1;
}
}
}
}
/* Assemble Residual */
iii = 0;
double rloc[nln][dim];
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
rloc[a][i_c] = 0.0;
}
}
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
rloc[a][i_c] += ( gradphi[q][0][a] * PRESTRESS_NP1(ie,q,i_c,0) + gradphi[q][1][a] * PRESTRESS_NP1(ie,q,i_c,1) + gradphi[q][2][a] * PRESTRESS_NP1(ie,q,i_c,2) ) * w[q];;
}
}
}
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
myRrows[ie*nln*3+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myRcoef[ie*nln*3+iii] = rloc[a][i_c]*detjac[ie];
iii = iii + 1;
}
}
}
}
/*************************************************************************/
void NeoHookeanMaterial_stress(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(noe,dim*dim, mxREAL);
plhs[1] = mxCreateDoubleMatrix(noe,dim*dim, mxREAL);
double* P = mxGetPr(plhs[0]);
double* Sigma = mxGetPr(plhs[1]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double gradphi[dim][nln][NumQuadPoints];
double* elements = mxGetPr(prhs[4]);
double GradUh[dim][dim][NumQuadPoints];
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double Young = material_param[0];
double Poisson = material_param[1];
double mu = Young / (2 + 2 * Poisson);
double lambda = Young * Poisson /( (1+Poisson) * (1-2*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,Sigma,U_h) private(gradphi,GradUh,ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,numRowsElements,nln2,nln,NumNodes,Id,mu,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double traceE[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double P_Uh[dim][dim];
double invFT[NumQuadPoints][dim][dim];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double C[NumQuadPoints][dim][dim];
double I_C[NumQuadPoints];
q = 0;
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[d1][d2][q] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[d1][d2][q] = GradUh[d1][d2][q] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[d1][d2][q];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
logdetF[q] = log( detF[q] );
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
double P1[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P_Uh[d1][d2] = mu * pow23detF[q] * ( F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] )
+ 1.0 / 2.0 * bulk * ( pow2detF[q] - detF[q] + logdetF[q] ) * invFT[q][d1][d2];
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P[ie+(d1+d2*dim)*noe] = P_Uh[d1][d2] ;
}
}
double Sigma_tmp[dim][dim];
/* Sigma = 1 / det(F) * P * F^T */
MatrixProductAlphaT2(dim, 1.0 / detF[q], P_Uh, F[q], Sigma_tmp );
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Sigma[ie+(d1+d2*dim)*noe] = Sigma_tmp[d1][d2] ;
}
}
}
}
/*************************************************************************/ |
dahua_fmt_plug.c | /*
* Format for cracking Dahua hashes.
*
* http://www.securityfocus.com/archive/1/529799
* https://github.com/depthsecurity/dahua_dvr_auth_bypass
*
* This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without#
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dahua;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dahua);
#else
#include <string.h>
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 512
#else
#define OMP_SCALE 32768 // tuned K8-dual HT
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include <ctype.h>
#define FORMAT_LABEL "dahua"
#define FORMAT_NAME "\"MD5 based authentication\" Dahua"
#define FORMAT_TAG "$dahua$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 8
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$dahua$4WzwxXxM", "888888"}, // from hashcat.net
{"$dahua$HRG6OLE6", "Do You Even Lift?"},
{"$dahua$sh15yfFM", "666666"},
{"$dahua$6QNMIQGe", "admin"},
{"$dahua$g2UpKxOg", "passWOrd"},
{"$dahua$tlJwpbo6", ""},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
int i;
if (strncmp(p, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
p = p + TAG_LENGTH;
if (!p)
return 0;
if (strlen(p) != BINARY_SIZE)
return 0;
for (i = 0; i < BINARY_SIZE; i++)
if (!isalnum((int)(unsigned char)p[i]))
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
char *p;
char *out = buf.c;
p = strrchr(ciphertext, '$') + 1;
strncpy(out, p, BINARY_SIZE);
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
// from hashcat.net (alxchk)
static void compressor(unsigned char *in, unsigned char *out)
{
int i, j;
for (i = 0, j = 0; i < 16; i += 2, j++) {
out[j] = (in[i] + in[i+1]) % 62;
if (out[j] < 10) {
out[j] += 48;
} else if (out[j] < 36) {
out[j] += 55;
} else {
out[j] += 61;
}
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
// hash is compressor(md5(password))
MD5_CTX ctx;
unsigned char *out = (unsigned char*)crypt_out[index];
unsigned char hash[16];
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], saved_len[index]);
MD5_Final(hash, &ctx);
compressor(hash, out);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void dahua_set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_dahua = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
dahua_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
lotus85_fmt_plug.c | /*
* This software is Copyright (c) 2013 Sébastien Kaczmarek <skaczmarek@quarkslab.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Fixed the format to crack multiple hashes + added OMP support (Dhiru
* Kholia).
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus_85;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus_85);
#else
#include <stdio.h>
#include <string.h>
#include "stdint.h"
#include "sha.h"
#include <openssl/rc2.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64 // XXX tune me!
#endif
static int omp_t = 1;
#endif
#include "formats.h"
#include "common.h"
#include "memdbg.h"
/* Plugin definition */
#define FORMAT_LABEL "lotus85"
#define FORMAT_NAME "Lotus Notes/Domino 8.5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 0x64
#define BINARY_SIZE 0
#define BINARY_LENGTH 5
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
// #define MAX_KEYS_PER_CRYPT 0x900 // WTF?
#define MAX_KEYS_PER_CRYPT 1
#define LOTUS85_MAX_BLOB_SIZE 0x64
#define LOTUS85_MIN_BLOB_SIZE 40 // XXX fictional value, but isn't this length fixed?
/* Globals */
static const char LOTUS85_UNIQUE_STRING[] = "Lotus Notes Password Pad Uniquifier";
static uint8_t ebits_to_num[256]=
{
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
};
static struct custom_salt {
uint8_t lotus85_user_blob[LOTUS85_MAX_BLOB_SIZE];
uint32_t lotus85_user_blob_len;
} *cur_salt;
/*
* 5 bytes digest computed by the algorithm
* As the password is used to derive a RC2 key and decipher the user blob
* the reference digest is always different and we should track them all
*/
static uint8_t (*lotus85_last_binary_hash1)[BINARY_LENGTH];
static uint8_t (*lotus85_last_binary_hash2)[BINARY_LENGTH];
/* Plaintext passwords history requested by JtR engine */
static char (*lotus85_saved_passwords)[PLAINTEXT_LENGTH+1];
/* Decipher user.id user blob */
static void decipher_userid_blob(uint8_t *ciphered_blob, uint32_t len, uint8_t *userid_key, uint8_t *deciphered_blob)
{
RC2_KEY rc_key;
uint8_t buf[LOTUS85_MAX_BLOB_SIZE+8],rc_iv[8];
memset(buf, 0x0, sizeof(buf));
memset(rc_iv, 0, sizeof(rc_iv));
RC2_set_key(&rc_key, 8, userid_key, 64);
RC2_cbc_encrypt(ciphered_blob, buf, len, &rc_key, rc_iv, RC2_DECRYPT);
memcpy(deciphered_blob, buf, len);
}
/* Custom hash transformation function */
static void custom_password_hash_trans(uint8_t *data, uint8_t *out, uint8_t *state)
{
uint8_t buffer[48];
size_t i, j;
uint8_t c;
memset(buffer, 0, sizeof(buffer));
memcpy(buffer, state, 16);
memcpy(buffer + 16, data, 16);
for(i=0;i<16;i+=4)
{
buffer[32+i] = data[i] ^ state[i];
buffer[32+i+1] = data[i+1] ^ state[i+1];
buffer[32+i+2] = data[i+2] ^ state[i+2];
buffer[32+i+3] = data[i+3] ^ state[i+3];
}
for(j=c=0;j<18;j++)
{
for(i=0;i<sizeof(buffer);i+=6)
{
buffer[i] ^= ebits_to_num[(c-i+48) & 0xFF];
buffer[i+1] ^= ebits_to_num[(buffer[i]-i+47) & 0xFF];
buffer[i+2] ^= ebits_to_num[(buffer[i+1]-i+46) & 0xFF];
buffer[i+3] ^= ebits_to_num[(buffer[i+2]-i+45) & 0xFF];
buffer[i+4] ^= ebits_to_num[(buffer[i+3]-i+44) & 0xFF];
buffer[i+5] ^= ebits_to_num[(buffer[i+4]-i+43) & 0xFF];
c = buffer[i+5];
}
}
memcpy(state, buffer, 16);
c = out[15];
for(i=0;i<16;i+=4)
{
out[i] ^= ebits_to_num[data[i] ^ c];
out[i+1] ^= ebits_to_num[data[i+1] ^ out[i]];
out[i+2] ^= ebits_to_num[data[i+2] ^ out[i+1]];
out[i+3] ^= ebits_to_num[data[i+3] ^ out[i+2]];
c = out[i+3];
}
}
/* Custom hash function */
static void custom_password_hash(const char *password, uint8_t *out)
{
uint8_t block1[16], state[16], block2[16];
size_t len, rlen, block_pos = 0;
len = strlen(password);
memset(state, 0, sizeof(state));
memset(block2, 0, sizeof(block2));
while((block_pos + 15) < len)
{
memcpy(block1, password+block_pos, sizeof(block1));
custom_password_hash_trans(block1, state, block2);
block_pos += 16;
}
if(block_pos != len)
{
rlen = len - block_pos;
memcpy(block1, password+block_pos, rlen);
memset(block1+rlen, 16-rlen, 16-rlen);
custom_password_hash_trans(block1, state, block2);
}
else
{
memset(block1, sizeof(block1), sizeof(block1));
custom_password_hash_trans(block1, state, block2);
}
custom_password_hash_trans(state, state, block2);
memcpy(out, block2, sizeof(block2));
}
/* Hash cste::password with sha1 */
static void password_hash(const char *password, uint8_t *hash)
{
SHA_CTX s_ctx;
uint8_t digest[SHA_DIGEST_LENGTH];
SHA1_Init(&s_ctx);
SHA1_Update(&s_ctx, LOTUS85_UNIQUE_STRING, strlen(LOTUS85_UNIQUE_STRING));
SHA1_Update(&s_ctx, password, strlen(password));
SHA1_Final(digest, &s_ctx);
memcpy(hash, digest, sizeof(digest));
}
/* Hash/checksum function used for key derivation from plaintext password */
static void compute_key_mac(uint8_t *key, size_t len, uint8_t *mac, size_t mac_len)
{
size_t i, j, mlen=mac_len-1;
uint8_t k;
for(i=0;i<16;i++)
{
k = ebits_to_num[mac[0] ^ mac[1]];
for(j=0;j<mlen;j++)
{
mac[j] = mac[j+1];
}
mac[mlen] = key[i] ^ k;
}
}
/* Hash/checksum function used for digest storage */
static void compute_msg_mac(uint8_t *msg, size_t len, uint8_t *msg_mac)
{
size_t i, j;
uint8_t c;
for(i=j=0;i<len;i++)
{
if(j!=4)
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[j+1]];
j++;
}
else
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[0]];
j = 0;
}
}
c = msg_mac[0];
for(i=0;i<4;i++)
{
msg_mac[i] = msg_mac[i+1];
}
msg_mac[i] = c;
}
/*
* Derive password to retrieve the RC2 secret key
* used when deciphering user blob stored in user.id file
*/
static void get_user_id_secret_key(const char *password, uint8_t *secret_key)
{
uint8_t key[16+20], mac[8];
memset(key, 0, sizeof(key));
memset(mac, 0, sizeof(mac));
custom_password_hash(password, key);
password_hash(password, key+16);
compute_key_mac(key, sizeof(key), mac, sizeof(mac));
memcpy(secret_key, mac, sizeof(mac));
}
/* Plugin initialization */
static void lotus85_init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
lotus85_saved_passwords = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
lotus85_last_binary_hash1 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
lotus85_last_binary_hash2 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
}
static void done(void)
{
MEM_FREE(lotus85_last_binary_hash2);
MEM_FREE(lotus85_last_binary_hash1);
MEM_FREE(lotus85_saved_passwords);
}
/* Check if given ciphertext (hash) format is valid */
static int lotus85_valid(char *ciphertext,struct fmt_main *self)
{
int len;
len = strlen(ciphertext);
if(len % 2)
return 0;
if((len >> 1) > LOTUS85_MAX_BLOB_SIZE)
return 0;
if((len >> 1) < LOTUS85_MIN_BLOB_SIZE)
return 0;
if (hexlenu(ciphertext) != len)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
int i,len;
static struct custom_salt cs;
len = strlen(ciphertext) >> 1;
for (i = 0; i < len; i++)
cs.lotus85_user_blob[i] = (atoi16[ARCH_INDEX(ciphertext[i << 1])] << 4) + atoi16[ARCH_INDEX(ciphertext[(i << 1) + 1])];
cs.lotus85_user_blob_len = len;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* Set password at given index */
static void lotus85_set_key(char *key,int index)
{
strnzcpy(lotus85_saved_passwords[index],key,strlen(key)+1);
}
/* Return password at given index as string */
static char *lotus85_get_key(int index)
{
return lotus85_saved_passwords[index];
}
/* Main callback to compute lotus digest */
static int lotus85_crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
/* Compute digest for all given plaintext passwords */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char user_key[8], deciphered_userid[LOTUS85_MAX_BLOB_SIZE];
memset(lotus85_last_binary_hash1[index], 0, BINARY_LENGTH);
memset(lotus85_last_binary_hash2[index], 0, BINARY_LENGTH);
memset(user_key, 0, sizeof(user_key));
memset(deciphered_userid, 0, sizeof(deciphered_userid));
/* Derive password and retrieve RC2 key */
get_user_id_secret_key(lotus85_saved_passwords[index], user_key);
/* Deciphered user blob stored in user.id file */
decipher_userid_blob(cur_salt->lotus85_user_blob, cur_salt->lotus85_user_blob_len, user_key, deciphered_userid);
/* Store first deciphered digest */
memcpy(lotus85_last_binary_hash1[index], deciphered_userid + cur_salt->lotus85_user_blob_len - BINARY_LENGTH, BINARY_LENGTH);
/* Compute digest of deciphered message */
compute_msg_mac(deciphered_userid, cur_salt->lotus85_user_blob_len - BINARY_LENGTH, lotus85_last_binary_hash2[index]);
}
return count;
}
/* Check if one of last computed hashs match */
static int lotus85_cmp_all(void *binary,int count)
{
int i;
for(i = 0; i < count; i++)
{
if(!memcmp(lotus85_last_binary_hash1[i],lotus85_last_binary_hash2[i],BINARY_LENGTH))
return 1;
}
return 0;
}
/* Check if last computed hash match */
static int lotus85_cmp_one(void *binary,int index)
{
return !memcmp(lotus85_last_binary_hash1[index],lotus85_last_binary_hash2[index],BINARY_LENGTH);
}
/* No ASCII ciphertext, thus returns true */
static int lotus85_cmp_exact(char *source,int index)
{
return 1;
}
static struct fmt_tests lotus85_tests[] =
{
{"0040B2B17C344C236953F955B28E4865014034D1F664489D7F42B35FB6928A94DCFFEF7750CE029F94C83A582A80B4662D49B3FA45816143", "notesisterrible"},
{"CBCFC612FAE3154316223787C7CD29AD39BEDF4288FCDE310B32FD809C75F5FDC521667D5F6E7A047766F0E60952F7891593FFAF45AD0C15", "openwall"},
{NULL}
};
/* JtR lotus 8.5 structure registration */
struct fmt_main fmt_lotus_85 =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
lotus85_tests
}, {
lotus85_init,
done,
fmt_default_reset,
fmt_default_prepare,
lotus85_valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
lotus85_set_key, /* Set plaintext password */
lotus85_get_key, /* Get plaintext password */
fmt_default_clear_keys,
lotus85_crypt_all, /* Main hash function */
{
fmt_default_get_hash
},
lotus85_cmp_all, /* Compare * hash (binary) */
lotus85_cmp_one, /* Compare 1 hash (binary) */
lotus85_cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_fc64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_bool)
// op(A') function: GB (_unop_tran__identity_fc64_bool)
// C type: GxB_FC64_t
// A type: bool
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_bool)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pixel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP IIIII X X EEEEE L %
% P P I X X E L %
% PPPP I X EEE L %
% P I X X E L %
% P IIIII X X EEEEE LLLLL %
% %
% MagickCore Methods to Import/Export Pixels %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelChannelMap() acquires a pixel component map.
%
% The format of the AcquirePixelChannelMap() method is:
%
% PixelChannelMap *AcquirePixelChannelMap(void)
%
*/
MagickExport PixelChannelMap *AcquirePixelChannelMap(void)
{
PixelChannelMap
*channel_map;
ssize_t
i;
channel_map=(PixelChannelMap *) AcquireQuantumMemory(MaxPixelChannels,
sizeof(*channel_map));
if (channel_map == (PixelChannelMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_map,0,MaxPixelChannels*sizeof(*channel_map));
for (i=0; i < MaxPixelChannels; i++)
channel_map[i].channel=(PixelChannel) i;
return(channel_map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelChannelMap() clones a pixel component map.
%
% The format of the ClonePixelChannelMap() method is:
%
% PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map)
%
% A description of each parameter follows:
%
% o channel_map: the pixel component map.
%
*/
MagickExport PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map)
{
PixelChannelMap
*clone_map;
assert(channel_map != (PixelChannelMap *) NULL);
clone_map=AcquirePixelChannelMap();
if (clone_map == (PixelChannelMap *) NULL)
return((PixelChannelMap *) NULL);
(void) memcpy(clone_map,channel_map,MaxPixelChannels*
sizeof(*channel_map));
return(clone_map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelInfo() makes a duplicate of the given pixel info structure, or if
% pixel info is NULL, a new one.
%
% The format of the ClonePixelInfo method is:
%
% PixelInfo *ClonePixelInfo(const PixelInfo *pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel info.
%
*/
MagickExport PixelInfo *ClonePixelInfo(const PixelInfo *pixel)
{
PixelInfo
*pixel_info;
pixel_info=(PixelInfo *) AcquireMagickMemory(sizeof(*pixel_info));
if (pixel_info == (PixelInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*pixel_info=(*pixel);
return(pixel_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n f o r m P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConformPixelInfo() ensures the pixel conforms with the colorspace and alpha
% attribute of the image.
%
% The format of the ConformPixelInfo method is:
%
% void *ConformPixelInfo((Image *image,const PixelInfo *source,
% PixelInfo *destination,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source pixel info.
%
% o destination: the destination pixel info.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void ConformPixelInfo(Image *image,const PixelInfo *source,
PixelInfo *destination,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(destination != (const PixelInfo *) NULL);
*destination=(*source);
if (image->colorspace == CMYKColorspace)
{
if (IssRGBCompatibleColorspace(destination->colorspace) != MagickFalse)
ConvertRGBToCMYK(destination);
}
else
if (destination->colorspace == CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
ConvertCMYKToRGB(destination);
}
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if ((destination->alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e P i x e l G a m m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodePixelGamma() applies the expansive power-law nonlinearity to the pixel.
%
% The format of the DecodePixelGamma method is:
%
% double DecodePixelGamma(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
static inline double DecodeGamma(const double x)
{
div_t
quotient;
double
p,
term[9];
int
exponent;
static const double coefficient[] = /* terms for x^(7/5), x=1.5 */
{
1.7917488588043277509,
0.82045614371976854984,
0.027694100686325412819,
-0.00094244335181762134018,
0.000064355540911469709545,
-5.7224404636060757485e-06,
5.8767669437311184313e-07,
-6.6139920053589721168e-08,
7.9323242696227458163e-09
};
static const double powers_of_two[] = /* (2^x)^(7/5) */
{
1.0,
2.6390158215457883983,
6.9644045063689921093,
1.8379173679952558018e+01,
4.8502930128332728543e+01
};
/*
Compute x^2.4 == x*x^(7/5) == pow(x,2.4).
*/
term[0]=1.0;
term[1]=4.0*frexp(x,&exponent)-3.0;
term[2]=2.0*term[1]*term[1]-term[0];
term[3]=2.0*term[1]*term[2]-term[1];
term[4]=2.0*term[1]*term[3]-term[2];
term[5]=2.0*term[1]*term[4]-term[3];
term[6]=2.0*term[1]*term[5]-term[4];
term[7]=2.0*term[1]*term[6]-term[5];
term[8]=2.0*term[1]*term[7]-term[6];
p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+
coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+
coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8];
quotient=div(exponent-1,5);
if (quotient.rem < 0)
{
quotient.quot-=1;
quotient.rem+=5;
}
return(x*ldexp(powers_of_two[quotient.rem]*p,7*quotient.quot));
}
MagickExport MagickRealType DecodePixelGamma(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92f);
return((MagickRealType) (QuantumRange*DecodeGamma((double) (QuantumScale*
pixel+0.055)/1.055)));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelChannelMap() deallocates memory associated with the pixel
% channel map.
%
% The format of the DestroyPixelChannelMap() method is:
%
% PixelChannelMap *DestroyPixelChannelMap(PixelChannelMap *channel_map)
%
% A description of each parameter follows:
%
% o channel_map: the pixel component map.
%
*/
MagickExport PixelChannelMap *DestroyPixelChannelMap(
PixelChannelMap *channel_map)
{
assert(channel_map != (PixelChannelMap *) NULL);
channel_map=(PixelChannelMap *) RelinquishMagickMemory(channel_map);
return((PixelChannelMap *) RelinquishMagickMemory(channel_map));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E n c o d e P i x e l G a m m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodePixelGamma() cancels any nonlinearity in the pixel.
%
% The format of the EncodePixelGamma method is:
%
% MagickRealType EncodePixelGamma(const double MagickRealType)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
static inline double EncodeGamma(const double x)
{
div_t
quotient;
double
p,
term[9];
int
exponent;
static const double coefficient[] = /* Chebychevi poly: x^(5/12), x=1.5 */
{
1.1758200232996901923,
0.16665763094889061230,
-0.0083154894939042125035,
0.00075187976780420279038,
-0.000083240178519391795367,
0.000010229209410070008679,
-1.3400466409860246e-06,
1.8333422241635376682e-07,
-2.5878596761348859722e-08
};
static const double powers_of_two[] = /* (2^N)^(5/12) */
{
1.0,
1.3348398541700343678,
1.7817974362806785482,
2.3784142300054420538,
3.1748021039363991669,
4.2378523774371812394,
5.6568542494923805819,
7.5509945014535482244,
1.0079368399158985525e1,
1.3454342644059433809e1,
1.7959392772949968275e1,
2.3972913230026907883e1
};
/*
Compute x^(1/2.4) == x^(5/12) == pow(x,1.0/2.4).
*/
term[0]=1.0;
term[1]=4.0*frexp(x,&exponent)-3.0;
term[2]=2.0*term[1]*term[1]-term[0];
term[3]=2.0*term[1]*term[2]-term[1];
term[4]=2.0*term[1]*term[3]-term[2];
term[5]=2.0*term[1]*term[4]-term[3];
term[6]=2.0*term[1]*term[5]-term[4];
term[7]=2.0*term[1]*term[6]-term[5];
term[8]=2.0*term[1]*term[7]-term[6];
p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+
coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+
coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8];
quotient=div(exponent-1,12);
if (quotient.rem < 0)
{
quotient.quot-=1;
quotient.rem+=12;
}
return(ldexp(powers_of_two[quotient.rem]*p,5*quotient.quot));
}
MagickExport MagickRealType EncodePixelGamma(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92f*pixel);
return((MagickRealType) QuantumRange*(1.055*EncodeGamma((double) QuantumScale*
pixel)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x p o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExportImagePixels() extracts pixel data from an image and returns it to you.
% The method returns MagickTrue on success otherwise MagickFalse if an error is
% encountered. The data is returned as char, short int, Quantum, unsigned int,
% unsigned long long, float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% ExportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% The format of the ExportImagePixels method is:
%
% MagickBooleanType ExportImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% const char *map,const StorageType type,void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,width,height: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *),
% LongPixel (unsigned int *), LongLongPixel (unsigned long long *),
% QuantumPixel (Quantum *), or ShortPixel (unsigned short *).
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ExportCharPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned char
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned char *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar((Quantum) 0);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar((Quantum) 0);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToChar(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToChar(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToChar(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToChar(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToChar(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToChar(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportDoublePixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
double
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(double *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=(double) (QuantumScale*GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=(double) (QuantumScale*GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=(double) (QuantumScale*GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=(double) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=(double) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=(double) (QuantumScale*
GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=(double) (QuantumScale*GetPixelIntensity(image,p));
break;
}
default:
*q=0;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportFloatPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
float
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(float *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=(float) (QuantumScale*GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=(float) (QuantumScale*GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=(float) (QuantumScale*GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=(float) (QuantumScale*((Quantum) (GetPixelAlpha(image,p))));
break;
}
case OpacityQuantum:
{
*q=(float) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=(float) (QuantumScale* GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=(float) (QuantumScale*GetPixelIntensity(image,p));
break;
}
default:
*q=0;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportLongPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned int
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned int *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToLong(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToLong(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToLong(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToLong(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToLong(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToLong(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportLongLongPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
MagickSizeType
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(MagickSizeType *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(ClampToQuantum(
GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToLongLong(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToLongLong(ClampToQuantum(
GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportQuantumPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(Quantum *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
*q++=(Quantum) (GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
*q++=(Quantum) 0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ClampToQuantum(GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
*q++=(Quantum) (GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
*q++=(Quantum) 0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=(Quantum) 0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=GetPixelRed(image,p);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=GetPixelGreen(image,p);
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=GetPixelBlue(image,p);
break;
}
case AlphaQuantum:
{
*q=GetPixelAlpha(image,p);
break;
}
case OpacityQuantum:
{
*q=GetPixelAlpha(image,p);
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=GetPixelBlack(image,p);
break;
}
case IndexQuantum:
{
*q=ClampToQuantum(GetPixelIntensity(image,p));
break;
}
default:
{
*q=(Quantum) 0;
break;
}
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportShortPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned short
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned short *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToShort(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToShort(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToShort(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToShort(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToShort(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToShort(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
MagickExport MagickBooleanType ExportImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
MagickBooleanType
status;
QuantumType
*quantum_map;
RectangleInfo
roi;
ssize_t
i;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=strlen(map);
quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map));
if (quantum_map == (QuantumType *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'A':
case 'a':
{
quantum_map[i]=AlphaQuantum;
break;
}
case 'B':
case 'b':
{
quantum_map[i]=BlueQuantum;
break;
}
case 'C':
case 'c':
{
quantum_map[i]=CyanQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'g':
case 'G':
{
quantum_map[i]=GreenQuantum;
break;
}
case 'I':
case 'i':
{
quantum_map[i]=IndexQuantum;
break;
}
case 'K':
case 'k':
{
quantum_map[i]=BlackQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'M':
case 'm':
{
quantum_map[i]=MagentaQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'o':
case 'O':
{
quantum_map[i]=OpacityQuantum;
break;
}
case 'P':
case 'p':
{
quantum_map[i]=UndefinedQuantum;
break;
}
case 'R':
case 'r':
{
quantum_map[i]=RedQuantum;
break;
}
case 'Y':
case 'y':
{
quantum_map[i]=YellowQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
default:
{
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
return(MagickFalse);
}
}
}
roi.width=width;
roi.height=height;
roi.x=x;
roi.y=y;
switch (type)
{
case CharPixel:
{
status=ExportCharPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case DoublePixel:
{
status=ExportDoublePixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case FloatPixel:
{
status=ExportFloatPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongPixel:
{
status=ExportLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongLongPixel:
{
status=ExportLongLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case QuantumPixel:
{
status=ExportQuantumPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case ShortPixel:
{
status=ExportShortPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
default:
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
status=MagickFalse;
}
}
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelInfo() initializes the PixelInfo structure.
%
% The format of the GetPixelInfo method is:
%
% GetPixelInfo(const Image *image,PixelInfo *pixel)
%
% A description of each parameter follows:
%
% o image: the image. (optional - may be NULL)
%
% o pixel: Specifies a pointer to a PixelInfo structure.
%
*/
MagickExport void GetPixelInfo(const Image *image,PixelInfo *pixel)
{
(void) memset(pixel,0,sizeof(*pixel));
pixel->storage_class=DirectClass;
pixel->colorspace=sRGBColorspace;
pixel->depth=MAGICKCORE_QUANTUM_DEPTH;
pixel->alpha_trait=UndefinedPixelTrait;
pixel->alpha=(double) OpaqueAlpha;
if (image == (const Image *) NULL)
return;
pixel->storage_class=image->storage_class;
pixel->colorspace=image->colorspace;
pixel->alpha_trait=image->alpha_trait;
pixel->depth=image->depth;
pixel->fuzz=image->fuzz;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n d o I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelInfoIntensity() returns a single sample intensity value from the red,
% green, and blue components of a pixel based on the selected method:
%
% Rec601Luma 0.298839R' + 0.586811G' + 0.114350B'
% Rec601Luminance 0.298839R + 0.586811G + 0.114350B
% Rec709Luma 0.212656R' + 0.715158G' + 0.072186B'
% Rec709Luminance 0.212656R + 0.715158G + 0.072186B
% Brightness max(R', G', B')
% Lightness (min(R', G', B') + max(R', G', B')) / 2.0
%
% MS (R^2 + G^2 + B^2) / 3.0
% RMS sqrt((R^2 + G^2 + B^2) / 3.0
% Average (R + G + B') / 3.0
%
% The format of the GetPixelInfoIntensity method is:
%
% MagickRealType GetPixelInfoIntensity(const Image *image,
% const Quantum *pixel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pixel: Specifies a pointer to a Quantum structure.
%
*/
MagickExport MagickRealType GetPixelInfoIntensity(
const Image *magick_restrict image,const PixelInfo *magick_restrict pixel)
{
MagickRealType
blue,
green,
red,
intensity;
PixelIntensityMethod
method;
method=Rec709LumaPixelIntensityMethod;
if (image != (const Image *) NULL)
method=image->intensity;
red=pixel->red;
green=pixel->green;
blue=pixel->blue;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/
(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/
sqrt(3.0));
break;
}
}
return(intensity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelIntensity() returns a single sample intensity value from the red,
% green, and blue components of a pixel based on the selected method:
%
% Rec601Luma 0.298839R' + 0.586811G' + 0.114350B'
% Rec601Luminance 0.298839R + 0.586811G + 0.114350B
% Rec709Luma 0.212656R' + 0.715158G' + 0.072186B'
% Rec709Luminance 0.212656R + 0.715158G + 0.072186B
% Brightness max(R', G', B')
% Lightness (min(R', G', B') + max(R', G', B')) / 2.0
%
% MS (R^2 + G^2 + B^2) / 3.0
% RMS sqrt((R^2 + G^2 + B^2) / 3.0
% Average (R + G + B') / 3.0
%
% The format of the GetPixelIntensity method is:
%
% MagickRealType GetPixelIntensity(const Image *image,
% const Quantum *pixel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pixel: Specifies a pointer to a Quantum structure.
%
*/
MagickExport MagickRealType GetPixelIntensity(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,pixel);
if (image->number_channels == 1)
return(red);
green=(MagickRealType) GetPixelGreen(image,pixel);
blue=(MagickRealType) GetPixelBlue(image,pixel);
switch (image->intensity)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/
(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if ((image->colorspace == RGBColorspace) ||
(image->colorspace == LinearGRAYColorspace))
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if ((image->colorspace == sRGBColorspace) ||
(image->colorspace == GRAYColorspace))
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if ((image->colorspace == RGBColorspace) ||
(image->colorspace == LinearGRAYColorspace))
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if ((image->colorspace == sRGBColorspace) ||
(image->colorspace == GRAYColorspace))
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/
sqrt(3.0));
break;
}
}
return(intensity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImportImagePixels() accepts pixel data and stores in the image at the
% location you specify. The method returns MagickTrue on success otherwise
% MagickFalse if an error is encountered. The pixel data can be either char,
% Quantum, short int, unsigned int, unsigned long long, float, or double in
% the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% ImportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the ImportImagePixels method is:
%
% MagickBooleanType ImportImagePixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% const char *map,const StorageType type,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,width,height: These values define the perimeter
% of a region of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *),
% LongPixel (unsigned int *), LongLongPixel (unsigned long long *),
% QuantumPixel (Quantum *), or ShortPixel (unsigned short *).
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ImportCharPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned char
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned char *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRO") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBO") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleCharToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleCharToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleCharToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleCharToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleCharToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleCharToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleCharToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportDoublePixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const double
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const double *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportFloatPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const float
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const float *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportLongPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned int
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned int *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleLongToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleLongToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleLongToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleLongToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleLongToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleLongToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleLongToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportLongLongPixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const MagickSizeType
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const MagickSizeType *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleLongLongToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleLongLongToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleLongLongToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleLongLongToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportQuantumPixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const Quantum *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
SetPixelAlpha(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
SetPixelAlpha(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,*p,q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,*p,q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,*p,q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,*p,q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,*p,q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,*p,q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,*p,q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportShortPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned short
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned short *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelAlpha(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelAlpha(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleShortToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleShortToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleShortToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleShortToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleShortToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleShortToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleShortToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
MagickExport MagickBooleanType ImportImagePixels(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,const char *map,
const StorageType type,const void *pixels,ExceptionInfo *exception)
{
MagickBooleanType
status;
QuantumType
*quantum_map;
RectangleInfo
roi;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=strlen(map);
quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map));
if (quantum_map == (QuantumType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
{
quantum_map[i]=AlphaQuantum;
image->alpha_trait=BlendPixelTrait;
break;
}
case 'B':
case 'b':
{
quantum_map[i]=BlueQuantum;
break;
}
case 'C':
case 'c':
{
quantum_map[i]=CyanQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'g':
case 'G':
{
quantum_map[i]=GreenQuantum;
break;
}
case 'K':
case 'k':
{
quantum_map[i]=BlackQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'I':
case 'i':
{
quantum_map[i]=IndexQuantum;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case 'm':
case 'M':
{
quantum_map[i]=MagentaQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'O':
case 'o':
{
quantum_map[i]=OpacityQuantum;
image->alpha_trait=BlendPixelTrait;
break;
}
case 'P':
case 'p':
{
quantum_map[i]=UndefinedQuantum;
break;
}
case 'R':
case 'r':
{
quantum_map[i]=RedQuantum;
break;
}
case 'Y':
case 'y':
{
quantum_map[i]=YellowQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
default:
{
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
return(MagickFalse);
}
}
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Transfer the pixels from the pixel data to the image.
*/
roi.width=width;
roi.height=height;
roi.x=x;
roi.y=y;
switch (type)
{
case CharPixel:
{
status=ImportCharPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case DoublePixel:
{
status=ImportDoublePixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case FloatPixel:
{
status=ImportFloatPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongPixel:
{
status=ImportLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongLongPixel:
{
status=ImportLongLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case QuantumPixel:
{
status=ImportQuantumPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case ShortPixel:
{
status=ImportShortPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
default:
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedStorageType","`%d'",type);
status=MagickFalse;
}
}
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializePixelChannelMap() defines the standard pixel component map.
%
% The format of the InitializePixelChannelMap() method is:
%
% void InitializePixelChannelMap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void InitializePixelChannelMap(Image *image)
{
PixelTrait
trait;
ssize_t
n;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) memset(image->channel_map,0,MaxPixelChannels*
sizeof(*image->channel_map));
trait=UpdatePixelTrait;
if (image->alpha_trait != UndefinedPixelTrait)
trait=(PixelTrait) (trait | BlendPixelTrait);
n=0;
if ((image->colorspace == LinearGRAYColorspace) ||
(image->colorspace == GRAYColorspace))
{
SetPixelChannelAttributes(image,BluePixelChannel,trait,n);
SetPixelChannelAttributes(image,GreenPixelChannel,trait,n);
SetPixelChannelAttributes(image,RedPixelChannel,trait,n++);
}
else
{
SetPixelChannelAttributes(image,RedPixelChannel,trait,n++);
SetPixelChannelAttributes(image,GreenPixelChannel,trait,n++);
SetPixelChannelAttributes(image,BluePixelChannel,trait,n++);
}
if (image->colorspace == CMYKColorspace)
SetPixelChannelAttributes(image,BlackPixelChannel,trait,n++);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelChannelAttributes(image,AlphaPixelChannel,CopyPixelTrait,n++);
if (image->storage_class == PseudoClass)
SetPixelChannelAttributes(image,IndexPixelChannel,CopyPixelTrait,n++);
if ((image->channels & ReadMaskChannel) != 0)
SetPixelChannelAttributes(image,ReadMaskPixelChannel,CopyPixelTrait,n++);
if ((image->channels & WriteMaskChannel) != 0)
SetPixelChannelAttributes(image,WriteMaskPixelChannel,CopyPixelTrait,n++);
if ((image->channels & CompositeMaskChannel) != 0)
SetPixelChannelAttributes(image,CompositeMaskPixelChannel,CopyPixelTrait,
n++);
if (image->number_meta_channels > 0)
{
PixelChannel
meta_channel;
ssize_t
i;
meta_channel=MetaPixelChannels;
for (i=0; i < (ssize_t) image->number_meta_channels; i++)
{
assert(meta_channel < MaxPixelChannels);
SetPixelChannelAttributes(image,meta_channel,UpdatePixelTrait,n);
meta_channel=(PixelChannel) (meta_channel+1);
n++;
}
}
assert(n < MaxPixelChannels);
image->number_channels=(size_t) n;
(void) SetPixelChannelMask(image,image->channel_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelChannel() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just the specified channel.
%
% The format of the InterpolatePixelChannel method is:
%
% MagickBooleanType InterpolatePixelChannel(
% const Image *magick_restrict image,const CacheView *image_view,
% const PixelChannel channel,const PixelInterpolateMethod method,
% const double x,const double y,double *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image view.
%
% o channel: the pixel channel to interpolate.
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CatromWeights(const double x,double (*weights)[4])
{
double
alpha,
beta,
gamma;
/*
Nicolas Robidoux' 10 flops (4* + 5- + 1+) refactoring of the computation
of the standard four 1D Catmull-Rom weights. The sampling location is
assumed between the second and third input pixel locations, and x is the
position relative to the second input pixel location. Formulas originally
derived for the VIPS (Virtual Image Processing System) library.
*/
alpha=(double) 1.0-x;
beta=(double) (-0.5)*x*alpha;
(*weights)[0]=alpha*beta;
(*weights)[3]=x*beta;
/*
The following computation of the inner weights from the outer ones work
for all Keys cubics.
*/
gamma=(*weights)[3]-(*weights)[0];
(*weights)[1]=alpha-(*weights)[0]+gamma;
(*weights)[2]=x-(*weights)[3]-gamma;
}
static inline void SplineWeights(const double x,double (*weights)[4])
{
double
alpha,
beta;
/*
Nicolas Robidoux' 12 flops (6* + 5- + 1+) refactoring of the computation
of the standard four 1D cubic B-spline smoothing weights. The sampling
location is assumed between the second and third input pixel locations,
and x is the position relative to the second input pixel location.
*/
alpha=(double) 1.0-x;
(*weights)[3]=(double) (1.0/6.0)*x*x*x;
(*weights)[0]=(double) (1.0/6.0)*alpha*alpha*alpha;
beta=(*weights)[3]-(*weights)[0];
(*weights)[1]=alpha-(*weights)[0]+beta;
(*weights)[2]=x-(*weights)[3]-beta;
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
MagickExport MagickBooleanType InterpolatePixelChannel(
const Image *magick_restrict image,const CacheView_ *image_view,
const PixelChannel channel,const PixelInterpolateMethod method,
const double x,const double y,double *pixel,ExceptionInfo *exception)
{
double
alpha[16],
gamma,
pixels[16];
MagickBooleanType
status;
PixelInterpolateMethod
interpolate;
PixelTrait
traits;
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
status=MagickTrue;
*pixel=0.0;
traits=GetPixelChannelTraits(image,channel);
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=image->interpolate;
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else
if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* Number of pixels to average */
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < (ssize_t) count; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < (ssize_t) count; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
for (i=0; i < (ssize_t) count; i++)
{
gamma=PerceptibleReciprocal(alpha[i])/count;
*pixel+=gamma*pixels[i];
}
break;
}
case BilinearInterpolatePixel:
default:
{
PointInfo
delta,
epsilon;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*(epsilon.y*(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*
(epsilon.x*pixels[2]+delta.x*pixels[3]));
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(MagickRealType) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (i=0; i <= 1L; i++) {
if ((y-y_offset) >= 0.75)
{
alpha[i]=alpha[i+2]; /* take right pixels */
pixels[i]=pixels[i+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[i]+=alpha[i+2]; /* add up alpha weights */
pixels[i]+=pixels[i+2];
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1]; /* take bottom row blend */
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+=alpha[1]; /* add up alpha weights */
pixels[0]+=pixels[1];
}
if (channel != AlphaPixelChannel)
gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */
else
gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */
*pixel=gamma*pixels[0];
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 16; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 16; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
gamma=(channel == AlphaPixelChannel ? (double) 1.0 :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
*pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+
cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*
pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+
cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*
pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15]));
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
*pixel=(double) GetPixelChannel(image,channel,p);
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
*pixel=(double) GetPixelChannel(image,channel,p);
break;
}
case MeshInterpolatePixel:
{
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=GetPixelLuma(image,p)-(double)
GetPixelLuma(image,p+3*GetPixelChannels(image));
luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double)
GetPixelLuma(image,p+2*GetPixelChannels(image));
if (fabs((double) luminance.x) < fabs((double) luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[2],pixels[3],
pixels[0]);
}
else
{
/*
Top-right triangle (pixel: 1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[1],pixels[0],
pixels[3]);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[0],pixels[1],
pixels[2]);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[3],pixels[2],
pixels[1]);
}
}
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 16; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 16; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
gamma=(channel == AlphaPixelChannel ? (double) 1.0 :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
*pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+
cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*
pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+
cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*
pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15]));
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelChannels() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just the current channel setting of the
% destination image into which the color is to be stored
%
% The format of the InterpolatePixelChannels method is:
%
% MagickBooleanType InterpolatePixelChannels(
% const Image *magick_restrict source,const CacheView *source_view,
% const Image *magick_restrict destination,
% const PixelInterpolateMethod method,const double x,const double y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o source: the source.
%
% o source_view: the source view.
%
% o destination: the destination image, for the interpolated color
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType InterpolatePixelChannels(
const Image *magick_restrict source,const CacheView_ *source_view,
const Image *magick_restrict destination,const PixelInterpolateMethod method,
const double x,const double y,Quantum *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
double
alpha[16],
gamma,
pixels[16];
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
PixelInterpolateMethod
interpolate;
assert(source != (Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(source_view != (CacheView *) NULL);
status=MagickTrue;
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=source->interpolate;
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else
if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* Number of pixels to average */
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
double
sum;
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
for (j=0; j < (ssize_t) count; j++)
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
sum=0.0;
if ((traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) count; j++)
sum+=pixels[j];
sum/=count;
SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel);
continue;
}
for (j=0; j < (ssize_t) count; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]*=alpha[j];
gamma=PerceptibleReciprocal(alpha[j]);
sum+=gamma*pixels[j];
}
sum/=count;
SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel);
}
break;
}
case BilinearInterpolatePixel:
default:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PointInfo
delta,
epsilon;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
pixels[0]=(double) p[i];
pixels[1]=(double) p[GetPixelChannels(source)+i];
pixels[2]=(double) p[2*GetPixelChannels(source)+i];
pixels[3]=(double) p[3*GetPixelChannels(source)+i];
if ((traits & BlendPixelTrait) == 0)
{
gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x)));
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y*
(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x*
pixels[2]+delta.x*pixels[3]))),pixel);
continue;
}
alpha[0]=QuantumScale*GetPixelAlpha(source,p);
alpha[1]=QuantumScale*GetPixelAlpha(source,p+GetPixelChannels(source));
alpha[2]=QuantumScale*GetPixelAlpha(source,p+2*
GetPixelChannels(source));
alpha[3]=QuantumScale*GetPixelAlpha(source,p+3*
GetPixelChannels(source));
pixels[0]*=alpha[0];
pixels[1]*=alpha[1];
pixels[2]*=alpha[2];
pixels[3]*=alpha[3];
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y*
(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x*pixels[2]+
delta.x*pixels[3]))),pixel);
}
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if (source->alpha_trait != BlendPixelTrait)
for (j=0; j < 4; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 4; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
if (channel != AlphaPixelChannel)
pixels[j]*=alpha[j];
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (j=0; j <= 1L; j++)
{
if ((y-y_offset) >= 0.75)
{
alpha[j]=alpha[j+2]; /* take right pixels */
pixels[j]=pixels[j+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[j]+=alpha[j+2]; /* add up alpha weights */
pixels[j]+=pixels[j+2];
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1]; /* take bottom row blend */
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+=alpha[1]; /* add up alpha weights */
pixels[0]+=pixels[1];
}
if (channel != AlphaPixelChannel)
gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */
else
gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */
SetPixelChannel(destination,channel,ClampToQuantum(gamma*pixels[0]),
pixel);
}
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
for (j=0; j < 16; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 16; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i];
}
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
gamma=((traits & BlendPixelTrait) ? (double) (1.0) :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]*
pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]*
(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+
cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]*
pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]*
pixels[14]+cx[3]*pixels[15]))),pixel);
}
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],pixel);
}
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],pixel);
}
break;
}
case MeshInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PointInfo
delta,
luminance;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
pixels[0]=(double) p[i];
pixels[1]=(double) p[GetPixelChannels(source)+i];
pixels[2]=(double) p[2*GetPixelChannels(source)+i];
pixels[3]=(double) p[3*GetPixelChannels(source)+i];
if ((traits & BlendPixelTrait) == 0)
{
alpha[0]=1.0;
alpha[1]=1.0;
alpha[2]=1.0;
alpha[3]=1.0;
}
else
{
alpha[0]=QuantumScale*GetPixelAlpha(source,p);
alpha[1]=QuantumScale*GetPixelAlpha(source,p+
GetPixelChannels(source));
alpha[2]=QuantumScale*GetPixelAlpha(source,p+2*
GetPixelChannels(source));
alpha[3]=QuantumScale*GetPixelAlpha(source,p+3*
GetPixelChannels(source));
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=fabs((double) (GetPixelLuma(source,p)-
GetPixelLuma(source,p+3*GetPixelChannels(source))));
luminance.y=fabs((double) (GetPixelLuma(source,p+
GetPixelChannels(source))-GetPixelLuma(source,p+2*
GetPixelChannels(source))));
if (luminance.x < luminance.y)
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[2],pixels[3],pixels[0])),pixel);
}
else
{
/*
Top-right triangle (pixel: 1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[1],pixels[0],pixels[3])),pixel);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[0],pixels[1],pixels[2])),pixel);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[3],pixels[2],pixels[1])),pixel);
}
}
}
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
for (j=0; j < 16; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 16; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i];
}
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
gamma=((traits & BlendPixelTrait) ? (double) (1.0) :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]*
pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]*
(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+
cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]*
pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]*
pixels[14]+cx[3]*pixels[15]))),pixel);
}
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelInfo() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just RGBKA channels.
%
% The format of the InterpolatePixelInfo method is:
%
% MagickBooleanType InterpolatePixelInfo(const Image *image,
% const CacheView *image_view,const PixelInterpolateMethod method,
% const double x,const double y,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image view.
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void AlphaBlendPixelInfo(const Image *image,
const Quantum *pixel,PixelInfo *pixel_info,double *alpha)
{
if (image->alpha_trait == UndefinedPixelTrait)
{
*alpha=1.0;
pixel_info->red=(double) GetPixelRed(image,pixel);
pixel_info->green=(double) GetPixelGreen(image,pixel);
pixel_info->blue=(double) GetPixelBlue(image,pixel);
pixel_info->black=0.0;
if (image->colorspace == CMYKColorspace)
pixel_info->black=(double) GetPixelBlack(image,pixel);
pixel_info->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
*alpha=QuantumScale*GetPixelAlpha(image,pixel);
pixel_info->red=(*alpha*GetPixelRed(image,pixel));
pixel_info->green=(*alpha*GetPixelGreen(image,pixel));
pixel_info->blue=(*alpha*GetPixelBlue(image,pixel));
pixel_info->black=0.0;
if (image->colorspace == CMYKColorspace)
pixel_info->black=(*alpha*GetPixelBlack(image,pixel));
pixel_info->alpha=(double) GetPixelAlpha(image,pixel);
}
MagickExport MagickBooleanType InterpolatePixelInfo(const Image *image,
const CacheView_ *image_view,const PixelInterpolateMethod method,
const double x,const double y,PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
double
alpha[16],
gamma;
PixelInfo
pixels[16];
const Quantum
*p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
PixelInterpolateMethod
interpolate;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
status=MagickTrue;
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=image->interpolate;
GetPixelInfoPixel(image,(const Quantum *) NULL,pixel);
(void) memset(&pixels,0,sizeof(pixels));
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* number of pixels - square of size */
for (i=0; i < (ssize_t) count; i++)
{
AlphaBlendPixelInfo(image,p,pixels,alpha);
gamma=PerceptibleReciprocal(alpha[0]);
pixel->red+=gamma*pixels[0].red;
pixel->green+=gamma*pixels[0].green;
pixel->blue+=gamma*pixels[0].blue;
pixel->black+=gamma*pixels[0].black;
pixel->alpha+=pixels[0].alpha;
p += GetPixelChannels(image);
}
gamma=1.0/count; /* average weighting of each pixel in area */
pixel->red*=gamma;
pixel->green*=gamma;
pixel->blue*=gamma;
pixel->black*=gamma;
pixel->alpha*=gamma;
break;
}
case BackgroundInterpolatePixel:
{
*pixel=image->background_color; /* Copy PixelInfo Structure */
break;
}
case BilinearInterpolatePixel:
default:
{
PointInfo
delta,
epsilon;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*(epsilon.y*(epsilon.x*pixels[0].red+delta.x*
pixels[1].red)+delta.y*(epsilon.x*pixels[2].red+delta.x*pixels[3].red));
pixel->green=gamma*(epsilon.y*(epsilon.x*pixels[0].green+delta.x*
pixels[1].green)+delta.y*(epsilon.x*pixels[2].green+delta.x*
pixels[3].green));
pixel->blue=gamma*(epsilon.y*(epsilon.x*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*(epsilon.x*pixels[2].blue+delta.x*
pixels[3].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*(epsilon.y*(epsilon.x*pixels[0].black+delta.x*
pixels[1].black)+delta.y*(epsilon.x*pixels[2].black+delta.x*
pixels[3].black));
gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x)));
gamma=PerceptibleReciprocal(gamma);
pixel->alpha=gamma*(epsilon.y*(epsilon.x*pixels[0].alpha+delta.x*
pixels[1].alpha)+delta.y*(epsilon.x*pixels[2].alpha+delta.x*
pixels[3].alpha));
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
{
GetPixelInfoPixel(image,p+i*GetPixelChannels(image),pixels+i);
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (i=0; i <= 1L; i++)
{
if ((y-y_offset) >= 0.75)
{
alpha[i]=alpha[i+2]; /* take right pixels */
pixels[i]=pixels[i+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[i]+=alpha[i+2]; /* add up alpha weights */
pixels[i].red+=pixels[i+2].red;
pixels[i].green+=pixels[i+2].green;
pixels[i].blue+=pixels[i+2].blue;
pixels[i].black+=pixels[i+2].black;
pixels[i].alpha+=pixels[i+2].alpha;
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1];
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+= alpha[1]; /* add up alpha weights */
pixels[0].red+=pixels[1].red;
pixels[0].green+=pixels[1].green;
pixels[0].blue+=pixels[1].blue;
pixels[0].black+=pixels[1].black;
pixels[0].alpha+=pixels[1].alpha;
}
gamma=1.0/gamma;
alpha[0]=PerceptibleReciprocal(alpha[0]);
pixel->red=alpha[0]*pixels[0].red;
pixel->green=alpha[0]*pixels[0].green; /* divide by sum of alpha */
pixel->blue=alpha[0]*pixels[0].blue;
pixel->black=alpha[0]*pixels[0].black;
pixel->alpha=gamma*pixels[0].alpha; /* divide by number of pixels */
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*
pixels[12].green+cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*
pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case MeshInterpolatePixel:
{
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=GetPixelLuma(image,p)-(double)
GetPixelLuma(image,p+3*GetPixelChannels(image));
luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double)
GetPixelLuma(image,p+2*GetPixelChannels(image));
AlphaBlendPixelInfo(image,p,pixels+0,alpha+0);
AlphaBlendPixelInfo(image,p+GetPixelChannels(image),pixels+1,alpha+1);
AlphaBlendPixelInfo(image,p+2*GetPixelChannels(image),pixels+2,alpha+2);
AlphaBlendPixelInfo(image,p+3*GetPixelChannels(image),pixels+3,alpha+3);
if (fabs((double) luminance.x) < fabs((double) luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[2].black,
pixels[3].black,pixels[0].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[2].alpha,
pixels[3].alpha,pixels[0].alpha);
}
else
{
/*
Top-right triangle (pixel:1 , diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[1].black,
pixels[0].black,pixels[3].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[1].alpha,
pixels[0].alpha,pixels[3].alpha);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[0].black,
pixels[1].black,pixels[2].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[0].alpha,
pixels[1].alpha,pixels[2].alpha);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[3].black,
pixels[2].black,pixels[1].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[3].alpha,
pixels[2].alpha,pixels[1].alpha);
}
}
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*pixels[12].green+
cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s F u z z y E q u i v a l e n c e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsFuzzyEquivalencePixel() returns MagickTrue if the distance between two
% pixels is less than the specified distance in a linear three (or four)
% dimensional color space.
%
% The format of the IsFuzzyEquivalencePixel method is:
%
% void IsFuzzyEquivalencePixel(const Image *source,const Quantum *p,
% const Image *destination,const Quantum *q)
%
% A description of each parameter follows:
%
% o source: the source image.
%
% o p: Pixel p.
%
% o destination: the destination image.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType IsFuzzyEquivalencePixel(const Image *source,
const Quantum *p,const Image *destination,const Quantum *q)
{
double
distance,
fuzz,
pixel,
scale;
fuzz=GetFuzzyColorDistance(source,destination);
scale=1.0;
distance=0.0;
if ((source->alpha_trait != UndefinedPixelTrait) ||
(destination->alpha_trait != UndefinedPixelTrait))
{
/*
Transparencies are involved - set alpha distance.
*/
pixel=GetPixelAlpha(source,p)-(double) GetPixelAlpha(destination,q);
distance=pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
/*
Generate a alpha scaling factor to generate a 4D cone on colorspace.
Note that if one color is transparent, distance has no color component.
*/
if (source->alpha_trait != UndefinedPixelTrait)
scale*=QuantumScale*GetPixelAlpha(source,p);
if (destination->alpha_trait != UndefinedPixelTrait)
scale*=QuantumScale*GetPixelAlpha(destination,q);
if (scale <= MagickEpsilon)
return(MagickTrue);
}
/*
RGB or CMY color cube.
*/
distance*=3.0; /* rescale appropriately */
fuzz*=3.0;
pixel=GetPixelRed(source,p)-(double) GetPixelRed(destination,q);
if (IsHueCompatibleColorspace(source->colorspace) != MagickFalse)
{
/*
Compute an arc distance for hue. It should be a vector angle of
'S'/'W' length with 'L'/'B' forming appropriate cones.
*/
if (fabs((double) pixel) > (QuantumRange/2))
pixel-=QuantumRange;
pixel*=2.0;
}
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
pixel=GetPixelGreen(source,p)-(double) GetPixelGreen(destination,q);
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
pixel=GetPixelBlue(source,p)-(double) GetPixelBlue(destination,q);
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s F u z z y E q u i v a l e n c e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsFuzzyEquivalencePixelInfo() returns true if the distance between two
% colors is less than the specified distance in a linear three (or four)
% dimensional color space.
%
% This implements the equivalent of:
% fuzz < sqrt(color_distance^2 * u.a*v.a + alpha_distance^2)
%
% Which produces a multi-dimensional cone for that colorspace along the
% transparency vector.
%
% For example for an RGB:
% color_distance^2 = ( (u.r-v.r)^2 + (u.g-v.g)^2 + (u.b-v.b)^2 ) / 3
%
% See https://imagemagick.org/Usage/bugs/fuzz_distance/
%
% Hue colorspace distances need more work. Hue is not a distance, it is an
% angle!
%
% A check that q is in the same color space as p should be made and the
% appropriate mapping made. -- Anthony Thyssen 8 December 2010
%
% The format of the IsFuzzyEquivalencePixelInfo method is:
%
% MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p,
% const PixelInfo *q)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p,
const PixelInfo *q)
{
double
fuzz,
pixel;
double
scale,
distance;
fuzz=(double) MagickMax(MagickMax(p->fuzz,q->fuzz),(MagickRealType)
MagickSQ1_2);
fuzz*=fuzz;
scale=1.0;
distance=0.0;
if ((p->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
/*
Transparencies are involved - set alpha distance.
*/
pixel=(p->alpha_trait != UndefinedPixelTrait ? p->alpha : OpaqueAlpha)-
(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha);
distance=pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
/*
Generate a alpha scaling factor to generate a 4D cone on colorspace.
If one color is transparent, distance has no color component.
*/
if (p->alpha_trait != UndefinedPixelTrait)
scale=(QuantumScale*p->alpha);
if (q->alpha_trait != UndefinedPixelTrait)
scale*=(QuantumScale*q->alpha);
if (scale <= MagickEpsilon )
return(MagickTrue);
}
/*
CMYK create a CMY cube with a multi-dimensional cone toward black.
*/
if (p->colorspace == CMYKColorspace)
{
pixel=p->black-q->black;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
scale*=(double) (QuantumScale*(QuantumRange-p->black));
scale*=(double) (QuantumScale*(QuantumRange-q->black));
}
/*
RGB or CMY color cube.
*/
distance*=3.0; /* rescale appropriately */
fuzz*=3.0;
pixel=p->red-q->red;
if (IsHueCompatibleColorspace(p->colorspace) != MagickFalse)
{
/*
This calculates a arc distance for hue-- it should be a vector
angle of 'S'/'W' length with 'L'/'B' forming appropriate cones.
In other words this is a hack - Anthony.
*/
if (fabs((double) pixel) > (QuantumRange/2))
pixel-=QuantumRange;
pixel*=2.0;
}
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->green-q->green;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->blue-q->blue;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelChannelMask() sets the pixel channel map from the specified channel
% mask.
%
% The format of the SetPixelChannelMask method is:
%
% ChannelType SetPixelChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
static void LogPixelChannels(const Image *image)
{
ssize_t
i;
(void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]",
image->filename,image->channel_mask);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
char
channel_name[MagickPathExtent],
traits[MagickPathExtent];
const char
*name;
PixelChannel
channel;
channel=GetPixelChannelChannel(image,i);
switch (channel)
{
case RedPixelChannel:
{
name="red";
if (image->colorspace == CMYKColorspace)
name="cyan";
if ((image->colorspace == LinearGRAYColorspace) ||
(image->colorspace == GRAYColorspace))
name="gray";
break;
}
case GreenPixelChannel:
{
name="green";
if (image->colorspace == CMYKColorspace)
name="magenta";
break;
}
case BluePixelChannel:
{
name="blue";
if (image->colorspace == CMYKColorspace)
name="yellow";
break;
}
case BlackPixelChannel:
{
name="black";
if (image->storage_class == PseudoClass)
name="index";
break;
}
case IndexPixelChannel:
{
name="index";
break;
}
case AlphaPixelChannel:
{
name="alpha";
break;
}
case ReadMaskPixelChannel:
{
name="read-mask";
break;
}
case WriteMaskPixelChannel:
{
name="write-mask";
break;
}
case CompositeMaskPixelChannel:
{
name="composite-mask";
break;
}
case MetaPixelChannel:
{
name="meta";
break;
}
default:
name="undefined";
}
if (image->colorspace == UndefinedColorspace)
{
(void) FormatLocaleString(channel_name,MagickPathExtent,"%.20g",
(double) channel);
name=(const char *) channel_name;
}
*traits='\0';
if ((GetPixelChannelTraits(image,channel) & UpdatePixelTrait) != 0)
(void) ConcatenateMagickString(traits,"update,",MagickPathExtent);
if ((GetPixelChannelTraits(image,channel) & BlendPixelTrait) != 0)
(void) ConcatenateMagickString(traits,"blend,",MagickPathExtent);
if ((GetPixelChannelTraits(image,channel) & CopyPixelTrait) != 0)
(void) ConcatenateMagickString(traits,"copy,",MagickPathExtent);
if (*traits == '\0')
(void) ConcatenateMagickString(traits,"undefined,",MagickPathExtent);
traits[strlen(traits)-1]='\0';
(void) LogMagickEvent(PixelEvent,GetMagickModule()," %.20g: %s (%s)",
(double) i,name,traits);
}
}
MagickExport ChannelType SetPixelChannelMask(Image *image,
const ChannelType channel_mask)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
ChannelType
mask;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]",
image->filename,channel_mask);
mask=image->channel_mask;
image->channel_mask=channel_mask;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (GetChannelBit(channel_mask,channel) == 0)
{
SetPixelChannelTraits(image,channel,CopyPixelTrait);
continue;
}
if (channel == AlphaPixelChannel)
{
if ((image->alpha_trait & CopyPixelTrait) != 0)
{
SetPixelChannelTraits(image,channel,CopyPixelTrait);
continue;
}
SetPixelChannelTraits(image,channel,UpdatePixelTrait);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
SetPixelChannelTraits(image,channel,(const PixelTrait)
(UpdatePixelTrait | BlendPixelTrait));
continue;
}
SetPixelChannelTraits(image,channel,UpdatePixelTrait);
}
if (image->storage_class == PseudoClass)
SetPixelChannelTraits(image,IndexPixelChannel,CopyPixelTrait);
if ((image->channels & ReadMaskChannel) != 0)
SetPixelChannelTraits(image,ReadMaskPixelChannel,CopyPixelTrait);
if ((image->channels & WriteMaskChannel) != 0)
SetPixelChannelTraits(image,WriteMaskPixelChannel,CopyPixelTrait);
if ((image->channels & CompositeMaskChannel) != 0)
SetPixelChannelTraits(image,CompositeMaskPixelChannel,CopyPixelTrait);
if ((GetLogEventMask() & PixelEvent) != 0)
LogPixelChannels(image);
return(mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l M e t a C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelMetaChannels() sets the image meta channels.
%
% The format of the SetPixelMetaChannels method is:
%
% MagickBooleanType SetPixelMetaChannels(Image *image,
% const size_t number_meta_channels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_meta_channels: the number of meta channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetPixelMetaChannels(Image *image,
const size_t number_meta_channels,ExceptionInfo *exception)
{
image->number_meta_channels=MagickMin(number_meta_channels,MaxPixelChannels
-(size_t) MetaPixelChannels);
InitializePixelChannelMap(image);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortImagePixels() sorts pixels within each scanline in ascending order of
% intensity.
%
% The format of the SortImagePixels method is:
%
% MagickBooleanType SortImagePixels(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SortImagePixels(Image *image,
ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Sort image pixels.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns-1; x++)
{
MagickRealType
current,
previous;
ssize_t
j;
previous=GetPixelIntensity(image,q);
for (j=0; j < (ssize_t) (image->columns-x-1); j++)
{
current=GetPixelIntensity(image,q+(j+1)*GetPixelChannels(image));
if (previous > current)
{
Quantum
pixel[MaxPixelChannels];
/*
Swap adjacent pixels.
*/
(void) memcpy(pixel,q+j*GetPixelChannels(image),
GetPixelChannels(image)*sizeof(Quantum));
(void) memcpy(q+j*GetPixelChannels(image),q+(j+1)*
GetPixelChannels(image),GetPixelChannels(image)*sizeof(Quantum));
(void) memcpy(q+(j+1)*GetPixelChannels(image),pixel,
GetPixelChannels(image)*sizeof(Quantum));
}
else
previous=current;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
OMPElementWiseVectorAssembler.h | /**
* Copyright (c) 2012, OpenGeoSys Community (http://www.opengeosys.com)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.com/LICENSE.txt
*
*
* \file OMPElementWiseVectorAssembler.h
*
* Created on 2012-08-20 by Norihiro Watanabe
*/
#pragma once
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "MeshLib/Core/IMesh.h"
#include "MathLib/DataType.h"
#include "DiscreteLib/Core/IDiscreteVectorAssembler.h"
namespace MeshLib
{
class IMesh;
}
namespace DiscreteLib
{
/**
* \brief Element-based discrete vector assembler classes
*/
template <class T_VALUE, class T_UPDATER>
class OMPElementWiseVectorAssembler : public IDiscreteVectorAssembler<T_VALUE>
{
public:
typedef typename IDiscreteVectorAssembler<T_VALUE>::VectorType GlobalVectorType;
typedef T_UPDATER UpdaterType;
///
explicit OMPElementWiseVectorAssembler(UpdaterType* a) : _e_assembler(a) {};
///
virtual ~OMPElementWiseVectorAssembler() {};
/// Conduct the element by element assembly procedure
///
/// @param msh Mesh
/// @param dofManager Dof map manager
/// @param vec Discrete vector
void assembly(const MeshLib::IMesh &msh, GlobalVectorType &globalVec);
private:
UpdaterType* _e_assembler;
};
template <class T1, class T2>
void OMPElementWiseVectorAssembler<T1,T2>::assembly(const MeshLib::IMesh &msh, GlobalVectorType &globalVec)
{
const size_t n_ele = msh.getNumberOfElements();
UpdaterType assembler(*_e_assembler);
#ifdef _OPENMP
#pragma omp parallel for default(none), shared(msh, globalVec), firstprivate(assembler)
#endif
for (size_t i=0; i<n_ele; i++) {
MeshLib::IElement *e = msh.getElement(i);
assembler.update(*e, globalVec);
}
};
}
|
matfuncs.c | #include "matrix.h"
/** \brief Computes addition function
*
* \param[in] x
* \param[in] y
* \return \f$ x+y \f$
*
*/
mtype __mat_addfunc(mtype x, mtype y)
{
return (x+y);
}
/** \brief Computes subtraction function
*
* \param[in] x
* \param[in] y
* \return \f$ x-y \f$
*
*/
mtype __mat_subfunc(mtype x, mtype y)
{
return (x-y);
}
/** \brief Computes multiplication function
*
* \param[in] x
* \param[in] y
* \return \f$ xy \f$
*
*/
mtype __mat_mulfunc(mtype x, mtype y)
{
return (x*y);
}
/** \brief Computes division function
*
* \param[in] x
* \param[in] y
* \return \f$ \frac{x}{y} \f$
*
*/
mtype __mat_divfunc(mtype x, mtype y)
{
return (x/y);
}
/** \brief Computes square function
*
* \param[in] x
* \return \f$ x^{2} \f$
*
*/
mtype __mat_sqrfunc(mtype x)
{
return (x*x);
}
/** \brief Computes square root function
*
* \param[in] x
* \return \f$ \sqrt{x} \f$
*
*/
mtype __mat_sqrtfunc(mtype x)
{
return sqrt(x);
}
/** \brief Computes Huber weight function
*
* \param[in] x
* \param[in] k
* \return \f$ \begin{cases} 1, & \text{for } |x| \le k, \\ \frac{k}{|x|}, & \text{otherwise.}\end{cases} \f$
*
*/
mtype __mat_huber_wt(mtype x, mtype k)
{
if(fabs(x)<= k) return 1.0;
else return (mtype)(k/fabs(x));
}
/** \brief Computes bisquare weight function
*
* \param[in] x
* \param[in] k
* \return \f$ \begin{cases} \left ( 1-\left ( \frac{x}{k} \right )^{2} \right )^{2}, & \text{for } |x| \le k, \\ 0, & \text{otherwise.}\end{cases} \f$
*
*/
mtype __mat_bisquare_wt(mtype x, mtype k)
{
mtype a;
if(fabs(x)<= k)
{
a = x/k;
a = 1 - (a*a);
return (a*a);
}
else return 0.0;
}
/** \cond HIDDEN_SYMBOLS */
static __inline mtype __huber_wt(mtype x, mtype k)
{
if(fabs(x)<= k) return 1.0;
else return (mtype)(k/fabs(x));
}
static __inline mtype __bisquare_wt(mtype x, mtype k)
{
mtype a;
if(fabs(x)<= k)
{
a = x/k;
a = 1 - (a*a);
return (a*a);
}
else return 0.0;
}
/** \endcond */
/** \brief Computes inverse hyperbolic sine function
*
* \param[in] x
* \return \f$ \sinh^{-1}\left(x\right) \f$
*
*/
mtype __mat_arcsinh(mtype x)
{
mtype y;
if (fabs(x) > 1.0e10)
return (mtype)((x > 0.0) ? 0.69314718055995+log(fabs(x)) :-0.69314718055995+log(fabs(x)));
else
{
y=x*x;
return (mtype)((x == 0.0f) ? 0.0f : ((x > 0.0f) ?
__mat_logplusone((float)(fabs(x)+y/(1.0f+sqrt(1.0f+y)))) :
-__mat_logplusone((float)(fabs(x)+y/(1.0f+sqrt(1.0f+y))))));
}
}
/** \brief Computes inverse hyperbolic cosine function
*
* \param[in] x
* \return \f$ \cosh^{-1}\left(x\right) \f$
*
*/
mtype __mat_arccosh(mtype x)
{
return (mtype)((x <= 1.0) ? 0.0 : ((x > 1.0e10) ?
0.69314718055995+log(x) :
log(x+sqrt((x-1.0)*(x+1.0)))));
}
/** \brief Computes inverse hyperbolic tangent function
*
* \param[in] x
* \return \f$ \tanh^{-1}\left(x\right) \f$
*
*/
mtype __mat_arctanh(mtype x)
{
mtype ax;
if (fabs(x) >= 1.0)return (mtype)((x > 0.0) ? DOUBLE_MAX : -DOUBLE_MAX);
else
{
ax=(mtype)fabs(x);
return (mtype)((x == 0.0) ? 0.0 : ((x > 0.0) ? 0.5*__mat_logplusone(2.0f*ax/(1.0f-ax)) :
-0.5*__mat_logplusone(2.0f*ax/(1.0f-ax))));
}
}
/** \brief Computes logarithm plus one function
*
* \param[in] x
* \return \f$ \log\left(1+x\right) \f$
*
*/
mtype __mat_logplusone(mtype x)
{
mtype y,z;
if(x==0.0) return 0.0;
else if(x<-0.2928 || x>0.4142) return (mtype)log(1.0f+x);
else
{
z=x/(x+2.0f);
y=z*z;
return (mtype)(z*(2.0+y*(0.66666666663366+y*(0.400000001206045+y*(0.285714091590488+y*(0.22223823332791+y*(0.1811136267967+y*0.16948212488)))))));
}
}
/** \brief Rounds a number away from zero
*
* \param[in] x Input value
* \return \f$ \textrm{sgn}(x) \left\lfloor \left| x \right| + 0.5 \right\rfloor \f$
*
*/
mtype __mat_round_away_zero(mtype x)
{
if(x>0) return ceil(x);
else return floor(x);
}
/** \brief Rounds a number towards zero
*
* \param[in] x Input value
* \return \f$ \textrm{sgn}(x) \left\lceil \left| x \right| - 0.5 \right\rceil \f$
*
*/
mtype __mat_round_towards_zero(mtype x)
{
if(x>0) return floor(x);
else return ceil(x);
}
/** \brief Computes Huber weight function element-wise on a matrix
*
* \param[in] A Input matrix
* \param[in] k Huber parameter
* \return \f$ \mathbf{B},\, b_{ij}=f_k\left(a_{ij}\right) \f$ where \f$ f_k \f$ is the Huber weight function
*
*/
MATRIX mat_huber_wt(MATRIX A, mtype k, mtype sigma, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(n, m, UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);;
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = __huber_wt(A[i][j]/sigma, k);
}
}
return(result);
}
/** \brief Computes bisquare weight function element-wise on a matrix
*
* \param[in] A Input matrix
* \param[in] k Bisquare parameter
* \return \f$ \mathbf{B},\, b_{ij}=f_k\left(a_{ij}\right) \f$ where \f$ f_k \f$ is the biquare weight function
*
*/
MATRIX mat_bisquare_wt(MATRIX A, mtype k, mtype sigma, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(n, m, UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = __bisquare_wt(A[i][j]/sigma, k);
}
}
return(result);
}
/** \brief Computes a given function element-wise on a matrix
*
* \param[in] A Input matrix
* \param[in] f Given function
* \return \f$ \mathbf{B},\, b_{ij}=f\left(a_{ij}\right) \f$
*
*/
MATRIX mat_gfunc(MATRIX A, mtype (*pt2func)(mtype), MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result== NULL|| result==A) if((result = mat_creat(n, m, UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = (*pt2func)(A[i][j]);
}
}
return(result);
}
|
omp_copyin.c |
#include "omp_testsuite.h"
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
/*
static int sum0 = 0;
#pragma omp threadprivate(sum0)
static int myvalue = 0;
#pragma omp threadprivate(myvalue)
*/
static int sum1 = 789;
#pragma omp threadprivate(sum1)
int
check_omp_copyin (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
sum1 = 0;
#pragma omp parallel copyin(sum1)
{
/*printf("sum1=%d\n",sum1); */
#pragma omp for
for (i = 1; i < 1000; i++)
{
sum1 = sum1 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + sum1;
} /*end of critical */
} /* end of parallel */
known_sum = (999 * 1000) / 2;
return (known_sum == sum);
} /* end of check_threadprivate */
static int crosssum1 = 789;
#pragma omp threadprivate(crosssum1)
int
crosscheck_omp_copyin (FILE * logFile)
{
int sum = 0;
int known_sum;
int i;
crosssum1 = 0;
#pragma omp parallel
{
/*printf("sum1=%d\n",sum1); */
#pragma omp for
for (i = 1; i < 1000; i++)
{
crosssum1 = crosssum1 + i;
} /*end of for */
#pragma omp critical
{
sum = sum + crosssum1;
} /*end of critical */
} /* end of parallel */
known_sum = (999 * 1000) / 2;
return (known_sum == sum);
} /* end of check_threadprivate */
static int myvalue2 = 0;
int
crosscheck_spmd_threadprivate (FILE * logFile)
{
int iter;
int *data;
int size;
int failed = 0;
int my_random;
omp_set_dynamic (0);
#pragma omp parallel
{
#pragma omp master
{
size = omp_get_num_threads ();
data = (int *) malloc (size * sizeof (int));
}
}
srand (45);
for (iter = 0; iter < 100; iter++)
{
my_random = rand ();
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
myvalue2 = data[rank] = my_random + rank;
}
#pragma omp parallel reduction(+:failed)
{
int rank;
rank = omp_get_thread_num ();
failed = failed + (myvalue2 != data[rank]);
}
}
free (data);
return !failed;
}
|
GB_binop__minus_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32)
// A*D function (colscale): GB (_AxD__minus_fp32)
// D*A function (rowscale): GB (_DxB__minus_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32)
// C=scalar+B GB (_bind1st__minus_fp32)
// C=scalar+B' GB (_bind1st_tran__minus_fp32)
// C=A+scalar GB (_bind2nd__minus_fp32)
// C=A'+scalar GB (_bind2nd_tran__minus_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
region_layer.c | #include "region_layer.h"
#include "activations.h"
#include "blas.h"
#include "box.h"
#include "opencl.h"
#include "utils.h"
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l = {0};
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.cost = calloc(1, sizeof(float));
l.biases = calloc(n*2, sizeof(float));
l.bias_updates = calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.delta = calloc(batch*l.outputs, sizeof(float));
l.output = calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
l.backward = backward_region_layer;
#ifdef GPU
if (gpu_index >= 0) {
l.forward_gpu = forward_region_layer_gpu;
l.backward_gpu = backward_region_layer_gpu;
l.output_gpu = opencl_make_array(l.output, batch*l.outputs);
l.delta_gpu = opencl_make_array(l.delta, batch*l.outputs);
}
#endif
fprintf(stderr, "detection\n");
srand(0);
return l;
}
void resize_region_layer(layer *l, int w, int h)
{
l->w = w;
l->h = h;
#ifdef GPU
if (gpu_index >= 0) {
opencl_free_gpu_only(l->delta_gpu);
opencl_free_gpu_only(l->output_gpu);
}
#endif
l->outputs = h*w*l->n*(l->classes + l->coords + 1);
l->inputs = l->outputs;
l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
#ifdef GPU
if (gpu_index >= 0) {
l->delta_gpu = opencl_make_array(l->delta, l->batch*l->outputs);
l->output_gpu = opencl_make_array(l->output, l->batch*l->outputs);
}
#endif
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
box get_region_box_y4(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w;
b.y = (j + logistic_activate(x[index + 1])) / h;
b.w = exp(x[index + 2]) * biases[2*n];
b.h = exp(x[index + 3]) * biases[2*n+1];
if(1){
b.w = exp(x[index + 2]) * biases[2*n] / w;
b.h = exp(x[index + 3]) * biases[2*n+1] / h;
}
return b;
}
float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale, int stride)
{
box pred = get_region_box(x, biases, n, index, i, j, w, h, stride);
float iou = box_iou(pred, truth);
float tx = (truth.x*w - i);
float ty = (truth.y*h - j);
float tw = log(truth.w*w / biases[2*n]);
float th = log(truth.h*h / biases[2*n + 1]);
delta[index + 0*stride] = scale * (tx - x[index + 0*stride]);
delta[index + 1*stride] = scale * (ty - x[index + 1*stride]);
delta[index + 2*stride] = scale * (tw - x[index + 2*stride]);
delta[index + 3*stride] = scale * (th - x[index + 3*stride]);
return iou;
}
void delta_region_mask(float *truth, float *x, int n, int index, float *delta, int stride, int scale)
{
int i;
for(i = 0; i < n; ++i){
delta[index + i*stride] = scale*(truth[i] - x[index + i*stride]);
}
}
void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, int stride, float *avg_cat, int tag)
{
int i, n;
if(hier){
float pred = 1;
while(class >= 0){
pred *= output[index + stride*class];
int g = hier->group[class];
int offset = hier->group_offset[g];
for(i = 0; i < hier->group_size[g]; ++i){
delta[index + stride*(offset + i)] = scale * (0 - output[index + stride*(offset + i)]);
}
delta[index + stride*class] = scale * (1 - output[index + stride*class]);
class = hier->parent[class];
}
*avg_cat += pred;
} else {
if (delta[index] && tag){
delta[index + stride*class] = scale * (1 - output[index + stride*class]);
return;
}
for(n = 0; n < classes; ++n){
delta[index + stride*n] = scale * (((n == class)?1 : 0) - output[index + stride*n]);
if(n == class) *avg_cat += output[index + stride*n];
}
}
}
float logit(float x)
{
return log(x/(1.-x));
}
float tisnan(float x)
{
return (x != x);
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
if (gpu_index < 0) {
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
if(!l.softmax && !l.softmax_tree) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax_tree){
int i;
int count = l.coords + 1;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_cpu(net.input + count, group_size, l.batch, l.inputs, l.n*l.w*l.h, 1, l.n*l.w*l.h, l.temperature, l.output + count);
count += group_size;
}
} else if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
}
#endif
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
if(!net.train) return;
float avg_iou = 0;
float recall = 0;
float avg_cat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
int class_count = 0;
*(l.cost) = 0;
for (b = 0; b < l.batch; ++b) {
if(l.softmax_tree){
int onlyclass = 0;
for(t = 0; t < 30; ++t){
box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
float maxp = 0;
int maxi = 0;
if(truth.x > 100000 && truth.y > 100000){
for(n = 0; n < l.n*l.w*l.h; ++n){
int class_index = entry_index(l, b, n, l.coords + 1);
int obj_index = entry_index(l, b, n, l.coords);
float scale = l.output[obj_index];
l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
float p = scale*get_hierarchy_probability(l.output + class_index, l.softmax_tree, class, l.w*l.h);
if(p > maxp){
maxp = p;
maxi = n;
}
}
int class_index = entry_index(l, b, maxi, l.coords + 1);
int obj_index = entry_index(l, b, maxi, l.coords);
delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat, !l.softmax);
if(l.output[obj_index] < .3) l.delta[obj_index] = l.object_scale * (.3 - l.output[obj_index]);
else l.delta[obj_index] = 0;
l.delta[obj_index] = 0;
++class_count;
onlyclass = 1;
break;
}
}
if(onlyclass) continue;
}
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
float best_iou = 0;
for(t = 0; t < 30; ++t){
box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
best_iou = iou;
}
}
int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, l.coords);
avg_anyobj += l.output[obj_index];
l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
if(l.background) l.delta[obj_index] = l.noobject_scale * (1 - l.output[obj_index]);
if (best_iou > l.thresh) {
l.delta[obj_index] = 0;
}
if(*(net.seen) < 12800){
box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
truth.w = l.biases[2*n]/l.w;
truth.h = l.biases[2*n+1]/l.h;
delta_region_box(truth, l.output, l.biases, n, box_index, i, j, l.w, l.h, l.delta, .01, l.w*l.h);
}
}
}
}
for(t = 0; t < 30; ++t){
box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
if(!truth.x) break;
float best_iou = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
for(n = 0; n < l.n; ++n){
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
if(l.bias_match){
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
}
pred.x = 0;
pred.y = 0;
float iou = box_iou(pred, truth_shift);
if (iou > best_iou){
best_iou = iou;
best_n = n;
}
}
int box_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, 0);
float iou = delta_region_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, l.delta, l.coord_scale * (2 - truth.w*truth.h), l.w*l.h);
if(l.coords > 4){
int mask_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, 4);
delta_region_mask(net.truth + t*(l.coords + 1) + b*l.truths + 5, l.output, l.coords - 4, mask_index, l.delta, l.w*l.h, l.mask_scale);
}
if(iou > .5) recall += 1;
avg_iou += iou;
int obj_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords);
avg_obj += l.output[obj_index];
l.delta[obj_index] = l.object_scale * (1 - l.output[obj_index]);
if (l.rescore) {
l.delta[obj_index] = l.object_scale * (iou - l.output[obj_index]);
}
if(l.background){
l.delta[obj_index] = l.object_scale * (0 - l.output[obj_index]);
}
int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
if (l.map) class = l.map[class];
int class_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords + 1);
delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat, !l.softmax);
++count;
++class_count;
}
}
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
#endif
}
void backward_region_layer(const layer l, network net)
{
/*
int b;
int size = l.coords + l.classes + 1;
for (b = 0; b < l.batch*l.n; ++b){
int index = (b*size + 4)*l.w*l.h;
gradient_array(l.output + index, l.w*l.h, LOGISTIC, l.delta + index);
}
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
*/
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
}
void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i;
float *const predictions = l.output;
#pragma omp parallel for
for (i = 0; i < l.w*l.h; ++i){
int j, n;
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = i*l.n + n;
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index];
if(l.classfix == -1 && scale < .5) scale = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box_y4(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
if(l.softmax_tree){
hierarchy_predictions_y4(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if(map){
for(j = 0; j < 200; ++j){
float prob = scale*predictions[class_index+map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else {
for(j = l.classes - 1; j >= 0; --j){
if(!found && predictions[class_index + j] > .5){
found = 1;
} else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index+j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
} else {
for(j = 0; j < l.classes; ++j){
float prob = scale*predictions[class_index+j];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
if(only_objectness){
probs[index][0] = scale;
}
}
}
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(l.softmax_tree){
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0, l.w*l.h);
if(map){
for(j = 0; j < 200; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
} else {
int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
dets[index].prob[j] = (scale > thresh) ? scale : 0;
}
} else {
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
#ifdef GPU
void forward_region_layer_gpu(const layer l, network net)
{
copy_gpu(l.batch*l.inputs, net.input_gpu, 1, l.output_gpu, 1);
int b, n;
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array_offset_gpu(l.output_gpu, index, 2*l.w*l.h, LOGISTIC);
if(l.coords > 4){
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array_offset_gpu(l.output_gpu, index, (l.coords - 4)*l.w*l.h, LOGISTIC);
}
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array_offset_gpu(l.output_gpu, index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
if(!l.softmax && !l.softmax_tree) activate_array_offset_gpu(l.output_gpu, index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax_tree){
int index = entry_index(l, 0, 0, l.coords + 1);
softmax_offset_tree(net.input_gpu, index, l.w*l.h, l.batch*l.n, l.inputs/l.n, 1, l.output_gpu, *l.softmax_tree);
} else if (l.softmax) {
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_offset_gpu(net.input_gpu, index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu);
}
if(!net.train || l.onlyforward){
opencl_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
return;
}
opencl_pull_array_map(l.output_gpu, net.input, l.batch*l.inputs);
forward_region_layer(l, net);
//opencl_push_array(l.output_gpu, l.output, l.batch*l.outputs);
if(!net.train) return;
opencl_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
}
void backward_region_layer_gpu(const layer l, network net)
{
int b, n;
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
gradient_array_offset_gpu(l.output_gpu, index, 2*l.w*l.h, LOGISTIC, l.delta_gpu);
if(l.coords > 4){
index = entry_index(l, b, n*l.w*l.h, 4);
gradient_array_offset_gpu(l.output_gpu, index, (l.coords - 4)*l.w*l.h, LOGISTIC, l.delta_gpu);
}
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) gradient_array_offset_gpu(l.output_gpu, index, l.w*l.h, LOGISTIC, l.delta_gpu);
}
}
axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
}
#endif
void zero_objectness(layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
l.output[obj_index] = 0;
}
}
}
|
forest.c | #include "forest.h"
#ifdef USING_MPI
const MPI_Comm comm = MPI_COMM_WORLD;
int myrank, size;
#endif
int main(int argc, char** argv) {
int i, y, seed;
args* myArgs;
forest* f;
cell** swapGrid;
struct random_data* rand_state;
char rand_buf[256];
memset(rand_buf, 'q', (size_t)256);
int startX = 0, startY = 0;
int endX = 0, endY = 0;
int* rands;
#ifdef USING_OMP
int threads = omp_get_max_threads();
#else
int threads = 1;
#endif
#ifdef USING_MPI
//MPI_Request request[3];
//MPI_Status* status = (MPI_Status*) malloc(sizeof(MPI_Status) * 3);
MPI_Status status[3];
memset(&status, 0, sizeof(MPI_Status) * 3);
MPI_Init(&argc, &argv);
MPI_Comm_rank(comm, &myrank);
MPI_Comm_size(comm, &size);
if (size != 4) {
printf("Unfortunately this program has been hardcoded to run on 4 nodes.\nExiting...\n");
MPI_Finalize();
exit(1);
}
#endif
myArgs = parse_args(argc, argv);
f = alloc_forest(myArgs);
// set up which parts of the grid we're going to iterate over
#ifdef USING_MPI
int r0startX = 0;
int r0startY = 0;
//int r0endX = f->dimX/2;
int r0endY = f->dimY/2;
//int r1startX = (f->dimX/2) + 1;
int r1startX = (f->dimX/2);
int r1startY = 0;
//int r1endX = f->dimX;
int r1endY = f->dimY/2;
int r2startX = 0;
//int r2startY = (f->dimY/2) + 1;
int r2startY = (f->dimY/2);
//int r2endX = f->dimX/2;
int r2endY = f->dimY;
//int r3startX = (f->dimX/2) + 1;
int r3startX = (f->dimX/2);
//int r3startY = (f->dimY/2) + 1;
int r3startY = (f->dimY/2);
//int r3endX = f->dimX;
int r3endY = f->dimY;
switch(myrank) {
case 0:
startX = 0;
startY = 0;
endX = f->dimX/2;
endY = f->dimY/2;
break;
case 1:
//startX = (f->dimX/2) + 1;
startX = (f->dimX/2);
startY = 0;
endX = f->dimX;
endY = f->dimY/2;
break;
case 2:
startX = 0;
//startY = (f->dimY/2) + 1;
startY = (f->dimY/2);
endX = f->dimX/2;
endY = f->dimY;
break;
case 3:
//startX = (f->dimX/2) + 1;
startX = (f->dimX/2);
//startY = (f->dimY/2) + 1;
startY = (f->dimY/2);
endX = f->dimX;
endY = f->dimY;
break;
}
#else
startX = 0;
startY = 0;
endX = f->dimX;
endY = f->dimY;
#endif
// ncurses requires some setup
if (myArgs->output == NCURSES) {
init_ncurses(endX, endY);
}
if (myArgs->logging) {
myArgs->log = fopen("forest.log","w");
}
rand_state = (struct random_data*) malloc(sizeof(struct random_data) * threads);
memset(rand_state, 0, sizeof(struct random_data) * threads);
if (rand_state == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
for (int q = 0; q < threads; q++) {
if (myArgs->output == VERIFY) {
seed = 5; //deterministic
} else {
#ifdef USING_MPI
seed = time(NULL) ^ (q + myrank);
#else
seed = time(NULL) ^ q; //non-deterministic
#endif
}
initstate_r(seed, rand_buf, 256, &rand_state[q]);
srandom_r(seed, &rand_state[q]);
}
#ifdef USING_MPI
//fprintf(stderr,"rank %d is working on x %d to %d and y %d to %d\n",myrank, startX, endX, startY, endY);
#endif
rands = (int*) malloc(sizeof(int)*4);
memset(rands, 0, sizeof(int)*4);
// main loop
for (i = 0; i < f->simLength; i++) {
//fprintf(stdout,"rank %d got to timestep %d\n",myrank,f->time);
//fflush();
#pragma omp parallel for private(rands)
for (y = startY; y < endY; y++) {
for (int x = startX; x < endX; x+=4) {
#ifdef USING_OMP
random_r(&rand_state[omp_get_thread_num()], &rands[0]);
random_r(&rand_state[omp_get_thread_num()], &rands[1]);
random_r(&rand_state[omp_get_thread_num()], &rands[2]);
random_r(&rand_state[omp_get_thread_num()], &rands[3]);
#else
random_r(&rand_state[0], &rands[0]);
random_r(&rand_state[0], &rands[1]);
random_r(&rand_state[0], &rands[2]);
random_r(&rand_state[0], &rands[3]);
#endif
cell_auto(f, x, y, f->oldGrid[y][x].status, &rands[0]);
cell_auto(f, x+1, y, f->oldGrid[y][x+1].status, &rands[1]);
cell_auto(f, x+2, y, f->oldGrid[y][x+2].status, &rands[2]);
cell_auto(f, x+3, y, f->oldGrid[y][x+3].status, &rands[3]);
}
}
if (myArgs->logging) {
fprintf(myArgs->log, "%d %d %d\n", i, f->treeCount, f->burnCount);
}
#ifdef USING_MPI
int z = 0;
int sendOne = 0, sendTwo = 0, sendThree = 0;
//int err = 0;
//size_t xferSize = sizeof(cell) * (f->dimX/2) * (f->dimY/2);
int xferSize = (int)sizeof(cell) * (f->dimX/2) * (f->dimY/2);
size_t rowSize = sizeof(cell) * (f->dimX/2);
//fprintf(stderr,"dimx = %d, dimy = %d, xfersize = %d, rowsize = %d\n",f->dimX, f->dimY, (int)xferSize, (int)rowSize);
//fprintf(stderr, "size of a cell is %d bytes\n",(int)sizeof(cell));
//fprintf(stderr, "there should be %d cells in the sendGrid.\n",((f->dimX/2) * (f->dimY/2)));
//fprintf(stderr, "but we are copying in %d times %d cells.\n", (endY - startY), f->dimX/2);
// copy worked-on part of grid to sendGrid for sending
for (int e = startY; e < endY; e++) {
memcpy(f->sendGrid[z], &f->newGrid[e][startX], rowSize);
z++;
}
switch(myrank) {
case 0:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid1, xferSize, MPI_BYTE, 1, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid2, xferSize, MPI_BYTE, 2, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid3, xferSize, MPI_BYTE, 3, 0, comm, &status[2]);
//checksum_grid(f->recvGrid1, f->dimX/2, f->dimY/2);
// copy from recvgrids
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 1:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid1, xferSize, MPI_BYTE, 0, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid2, xferSize, MPI_BYTE, 3, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid3, xferSize, MPI_BYTE, 2, 0, comm, &status[2]);
//checksum_grid(f->sendGrid, f->dimX/2, f->dimY/2);
// copy from recvgrids
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 2:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 3, 0, *f->recvGrid1, xferSize, MPI_BYTE, 3, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid2, xferSize, MPI_BYTE, 0, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid3, xferSize, MPI_BYTE, 1, 0, comm, &status[2]);
// copy from recvgrids
z = 0;
for (int e = r3startY; e < r3endY; e++) {
memcpy(&f->newGrid[e][r3startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid3[z], rowSize);
z++;
}
break;
case 3:
sendOne = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 2, 0, *f->recvGrid1, xferSize, MPI_BYTE, 2, 0, comm, &status[0]);
sendTwo = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 1, 0, *f->recvGrid2, xferSize, MPI_BYTE, 1, 0, comm, &status[1]);
sendThree = MPI_Sendrecv(*f->sendGrid, xferSize, MPI_BYTE, 0, 0, *f->recvGrid3, xferSize, MPI_BYTE, 0, 0, comm, &status[2]);
// copy from recvgrids
z = 0;
for (int e = r2startY; e < r2endY; e++) {
memcpy(&f->newGrid[e][r2startX], f->recvGrid1[z], rowSize);
z++;
}
z = 0;
for (int e = r1startY; e < r1endY; e++) {
memcpy(&f->newGrid[e][r1startX], f->recvGrid2[z], rowSize);
z++;
}
z = 0;
for (int e = r0startY; e < r0endY; e++) {
memcpy(&f->newGrid[e][r0startX], f->recvGrid3[z], rowSize);
z++;
}
break;
default:
fprintf(stderr,"your switch case is broke\n");
break;
}
if (sendOne || sendTwo || sendThree) {
fprintf(stderr,"i'm %d, %d %d %d\n",myrank, sendOne, sendTwo, sendThree);
}
// sanity check
/*
for (int zz = 0; zz < f->dimY; zz++) {
f->newGrid[25][zz].status = BURN;
f->newGrid[26][zz].status = BURN;
f->newGrid[27][zz].status = BURN;
f->newGrid[28][zz].status = BURN;
}
*/
// only one rank needs to output
if (myrank == 0) {
myArgs->out(f);
}
#else
myArgs->out(f); // it's a function pointer!
#endif
f->time++;
swapGrid = f->oldGrid;
f->oldGrid = f->newGrid;
f->newGrid = swapGrid;
}
if (myArgs->output == NCURSES) {
endwin();
}
if (myArgs->logging) {
fclose(myArgs->log);
}
#ifdef USING_MPI
text_output(f->newGrid, f->dimX, f->dimY);
//checksum_grid(f->newGrid, f->dimX, f->dimY);
MPI_Finalize();
#endif
return 0;
}
void text_output(cell** grid, int x, int y) {
char* out = (char*) malloc(sizeof(char) * x * y + (y + 1));
int c = 0;
int arr;
for (int s = 0; s < y; s++) {
for (int t = 0; t < x; t++) {
switch(grid[s][t].status) {
case EMPTY:
out[c++] = ' ';
break;
case TREE:
out[c++] = 'T';
break;
case BURN:
out[c++] = 'B';
break;
default:
fprintf(stderr,"well that's not good - %d %d\n",s,t);
break;
}
}
out[c++] = '\n';
}
out[c++] = '\0';
#ifdef USING_MPI
arr = myrank;
#else
arr = 0;
#endif
fprintf(stderr,"hi i'm %d and this is what i have for you\n%s\n",arr,out);
free(out);
}
void out_null(forest* f) {
return;
}
void out_verify(forest* f) {
int x, y;
int treeCheck = 0;
int burnCheck = 0;
char* strGrid;
char salt[] = "$1$cosc3500";
char* res;
if (f->time == f->simLength) {
fprintf(stdout,"Calculating grid integrity...\n");
strGrid = (char*)malloc((sizeof(char) * f->dimX * f->dimY) + 1);
if (strGrid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
strGrid[f->dimX * f->dimY] = '\0';
for (y = 0; y < f->dimY; y++) {
for (x = 0; x < f->dimX; x++) {
switch(f->newGrid[y][x].status) {
case EMPTY:
strGrid[(y * f->dimY) + x] = ' ';
break;
case TREE:
strGrid[(y * f->dimY) + x] = 'T';
treeCheck++;
break;
case BURN:
strGrid[(y * f->dimY) + x] = 'B';
burnCheck++;
break;
default:
fprintf(stderr, "bad tree status at %d %d\n",x,y);
break;
}
}
}
if (treeCheck == f->treeCount && burnCheck == f->burnCount) {
fprintf(stdout, "treeCount and burnCount okay\n");
} else {
fprintf(stdout, "error: treeCount = %d, treeCheck = %d\n", f->treeCount, treeCheck);
fprintf(stdout, "error: burnCount = %d, burnCheck = %d\n", f->burnCount, burnCheck);
}
res = crypt(strGrid, salt);
fprintf(stdout,"Grid checksum is: %s\n",res);
}
return;
}
void checksum_grid(cell** grid, int x, int y) {
int i, j;
int e = 0, t = 0, b = 0;
char salt[] = "$1$cosc3500";
char* res;
char* strGrid = (char*)malloc((sizeof(char) * x * y) + 1);
if (strGrid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
strGrid[x * y] = '\0';
int c = 0;
for (i = 0; i < y; i++) {
for (j = 0; j < x; j++) {
switch(grid[i][j].status) {
case EMPTY:
strGrid[c++] = ' ';
e++;
break;
case TREE:
strGrid[c++] = 'T';
t++;
break;
case BURN:
strGrid[c++] = 'B';
b++;
break;
default:
strGrid[c++] = '.';
fprintf(stderr, "bad tree status at %d %d, status is %d\n",x,y,grid[i][j].status);
break;
}
}
}
strGrid[c++] = '\0';
res = crypt(strGrid, salt);
#ifdef USING_MPI
fprintf(stderr,"rank %d, e=%d, t=%d, b=%d, %s\n", myrank, e, t, b, res);
//fprintf(stderr,"strgrid is %d chars long and c is %d \n",(int)strlen(strGrid),c);
#endif
free(strGrid);
}
/* perform cellular automata rules on each cell in the forest */
void cell_auto(forest* f, int x, int y, int mode, int* rand) {
int dx,dy;
int rx,ry;
cell* c = &f->oldGrid[y][x];
cell* n = &f->newGrid[y][x];
switch(mode) {
case EMPTY:
if ( (*rand % 10000) < GROWCHANCE) {
n->status = TREE;
n->age = 0;
#pragma omp atomic
f->treeCount++;
} else {
n->status = EMPTY;
}
return;
case TREE:
// chance of catching on fire from neighbouring trees
for (dx = -1; dx <= 1; dx++) {
for (dy = -1; dy <= 1; dy++) {
if (dx == 0 && dy == 0) { // don't examine yourself
continue;
}
rx = x + dx;
ry = y + dy;
if (rx >= 0 && rx < f->dimX &&
ry >= 0 && ry < f->dimY) { // bounds checking
if (f->oldGrid[ry][rx].status == BURN &&
(*rand % 720) < c->age) {
n->status = BURN;
n->age = 0;
n->burnTime = BURNLENGTH;
#pragma omp atomic
f->burnCount++;
f->treeCount--;
return; // nothing else to do
}
}
}
}
// chance of bursting into flames spontaneously
if ( (*rand % 150000) < BURNCHANCE) {
n->status = BURN;
n->burnTime = BURNLENGTH;
#pragma omp atomic
f->burnCount++;
f->treeCount--;
return;
}
// if we get here, it's a tree and is still a tree...?
n->status = TREE;
if (c->age < 240) {
n->age = c->age + 4;
}
return;
case BURN:
if (c->burnTime == 0) {
n->status = EMPTY;
n->age = 0;
#pragma omp atomic
f->burnCount--;
} else {
n->status = BURN;
n->burnTime = c->burnTime - 1;
}
return;
default:
fprintf(stderr,"unitialised cell at %d,%d, cell has status %d\n", x, y, mode);
return;
}
}
/* allocate and populate the forest struct, including initial trees */
forest* alloc_forest(args* myArgs) {
bitmap_t* png;
int x = myArgs->dimX;
int y = myArgs->dimY;
forest* f = (forest*)malloc(sizeof(forest));
if (f == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
f->newGrid = alloc_grid(x, y);
f->oldGrid = alloc_grid(x, y);
#ifdef USING_MPI
f->sendGrid = alloc_2d_grid(x/2, y/2);
f->recvGrid1 = alloc_2d_grid(x/2, y/2);
f->recvGrid2 = alloc_2d_grid(x/2, y/2);
f->recvGrid3 = alloc_2d_grid(x/2, y/2);
#endif
if (myArgs->output == PNG) {
// alloc pixel array
png = (bitmap_t*)malloc(sizeof(bitmap_t));
if (png == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
png->pixels = calloc(sizeof(pixel_t), x * y);
png->width = x;
png->height = y;
f->png = png;
}
f->dimX = x;
f->dimY = y;
f->treeCount = 0;
f->burnCount = 0;
f->time = 1;
f->simLength = myArgs->simLength;
return f;
}
cell** alloc_grid(int x, int y) {
int i;
cell** grid = (cell**)malloc(sizeof(cell*) * y);
if (grid == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
// i'm so sorry valgrind
memset(grid, 0, sizeof(cell*) * y);
for (i = 0; i < y; i++) {
grid[i] = (cell*)malloc(sizeof(cell) * x);
if (grid[i] == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
memset(grid[i], 0, sizeof(cell) * x);
for (int j = 0; j < x; j++) {
grid[i][j].status = EMPTY;
}
}
return grid;
}
// returns a 2d array that is allocated as one contiguous
// block of memory. cool trick!
cell** alloc_2d_grid(int x, int y) {
cell* data = (cell*)malloc(sizeof(cell) * x * y);
if (data == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
// according to valgrind i am a horrible person
memset(data, 0, sizeof(cell) * x * y);
cell** array = (cell**)malloc(sizeof(cell*) * y);
if (array == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
for (int i = 0; i < y; i++) {
array[i] = &(data[x * i]);
}
return array;
}
/* parse argv for valid parameters and return as a struct */
args* parse_args(int argc, char** argv) {
int len;
args* myArgs = (args*)malloc(sizeof(args));
if (myArgs == NULL) {
fprintf(stderr,"malloc failed!\n");
exit(17);
}
char usage[] = "Usage: forest dimensionX dimensionY output simlength [log]\n";
myArgs->logging = 0;
switch(argc) {
case 6:
if (!strcmp(argv[5],"log")) {
myArgs->logging = 1;
} else {
printf("%s", usage);
}
case 5:
myArgs->dimX = atoi(argv[1]);
myArgs->dimY = atoi(argv[2]);
if (myArgs->dimX % 8 != 0 || myArgs->dimY % 8 != 0) {
printf("Error: please use mod8 grid dimensions\n");
printf("%s", usage);
exit(1);
}
errno = 0;
len = strtol(argv[4], NULL, 10);
if ((errno == EINVAL) || (errno == ERANGE) || (len < 1) || (len > INT_MAX)) {
printf("Error: invalid simlength, please enter an integer between 1 and %d\n", INT_MAX);
printf("%s", usage);
exit(2);
}
myArgs->simLength = len;
if (!strcmp(argv[3],"ncurses")) {
myArgs->output = NCURSES;
myArgs->out = out_ncurses;
} else if (!strcmp(argv[3],"png")) {
myArgs->output = PNG;
myArgs->out = out_png;
} else if (!strcmp(argv[3],"null")) {
myArgs->output = NULLOUT;
myArgs->out = out_null;
} else if (!strcmp(argv[3],"verify")) {
myArgs->output = VERIFY;
myArgs->out = out_verify;
} else {
printf("Error: output should be one of 'png' 'ncurses' 'null' 'verify'\n");
printf("%s", usage);
exit(1);
}
return myArgs;
default:
printf("%s", usage);
exit(1);
}
return myArgs;
}
|
bem_pbc.c | #include "common.h"
void build_matrix_T(double *x_t, int *tri_nodes, double *bm, double *T, int n_node, int n_face) {
int i,j,k, p, c;
double be[3];
double *v, *v1, *v2, *v3;
//#pragma omp parallel for
#pragma omp parallel for private(i,j,k,p,c,be,v,v1,v2,v3)
for(p=0; p<n_node; p++){
v = &x_t[3*p];
for(c=0; c<n_face; c++){
i = tri_nodes[3*c];
j = tri_nodes[3*c+1];
k = tri_nodes[3*c+2];
v1 = &x_t[3*i];
v2 = &x_t[3*j];
v3 = &x_t[3*k];
boundary_element(v, v1, v2, v3, be, T);
bm[p*n_node+i] += be[0];
bm[p*n_node+j] += be[1];
bm[p*n_node+k] += be[2];
}
}
}
|
kernel_exp.c | /*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @generate NDIM -> n 1 2 3 4
* Generate different functions for different dimensions. This hack improves
* performance in certain cases. Value 'n' stands for general case, whereas all
* other values correspond to static values of dimensionality.
* During code generation step, each appearance of @NDIM (including this one)
* will be replace by proposed values. If you want to use this file outside
* STARS-H, simply do substitutions yourself.
*
* @file src/applications/spatial/kernel_exp.c
* @version 0.1.1
* @author Aleksandr Mikhalev
* @date 2018-11-06
*/
#include "common.h"
#include "starsh.h"
#include "starsh-spatial.h"
// If dimensionality is static
#if (@NDIM != n)
//! Replace variable ndim with static integer value
#define ndim @NDIM
#endif
void starsh_ssdata_block_exp_kernel_@NDIMd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Exponential kernel for @NDIM-dimensional spatial statistics problem
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \sigma^2 e^{-\frac{r_{ij}}{\beta}} + \mu \delta(r_{ij}),
* \f]
* where \f$ \delta \f$ is the delta function
* \f[
* \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0
* \end{array} \right.,
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$ and
* noise \f$ \mu \f$ come from \p row_data (\ref STARSH_ssdata object). No
* memory is allocated in this function!
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_ssdata_block_exp_kernel_1d(),
* starsh_ssdata_block_exp_kernel_2d(),
* starsh_ssdata_block_exp_kernel_3d(),
* starsh_ssdata_block_exp_kernel_4d(),
* starsh_ssdata_block_exp_kernel_nd().
* @ingroup app-spatial-kernels
* */
{
int i, j, k;
STARSH_ssdata *data1 = row_data;
STARSH_ssdata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
double beta = -data1->beta;
double noise = data1->noise;
double sigma = data1->sigma;
// Get coordinates
STARSH_int count1 = data1->particles.count;
STARSH_int count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
for(i = 1; i < ndim; i++) {
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill row-major matrix
//#pragma omp simd
for(i = 0; i < nrows; i++) {
for(j = 0; j < ncols; j++) {
dist = 0.0;
for(k = 0; k < ndim; k++) {
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
dist = sqrt(dist)/beta;
if(dist == 0)
buffer[i*(size_t)ld + j] = sigma+noise;
else
buffer[i*(size_t)ld + j] = sigma*exp(dist);
}
}
/* for(j = 0; j < ncols; j++) */
/* { */
/* for(i = 0; i < nrows; i++) */
/* { */
/* dist = 0.0; */
/* for(k = 0; k < ndim; k++) */
/* { */
/* tmp = x1[k][irow[i]]-x2[k][icol[j]]; */
/* dist += tmp*tmp; */
/* } */
/* dist = sqrt(dist)/beta; */
/* if(dist == 0) */
/* buffer[j*(size_t)ld+i] = sigma+noise; */
/* else */
/* buffer[j*(size_t)ld+i] = sigma*exp(dist); */
/* } */
/* } */
}
void starsh_ssdata_block_exp_kernel_@NDIMd_simd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Exponential kernel for @NDIM-dimensional spatial statistics problem
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \sigma^2 e^{-\frac{r_{ij}}{\beta}} + \mu \delta(r_{ij}),
* \f]
* where \f$ \delta \f$ is the delta function
* \f[
* \delta(x) = \left\{ \begin{array}{ll} 0, & x \ne 0\\ 1, & x = 0
* \end{array} \right.,
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and variance \f$ \sigma \f$, correlation length \f$ \beta \f$ and
* noise \f$ \mu \f$ come from \p row_data (\ref STARSH_ssdata object). No
* memory is allocated in this function!
*
* Uses SIMD instructions.
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[in] col_data: Pointer to physical data (\ref STARSH_ssdata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_ssdata_block_exp_kernel_1d_simd(),
* starsh_ssdata_block_exp_kernel_2d_simd(),
* starsh_ssdata_block_exp_kernel_3d_simd(),
* starsh_ssdata_block_exp_kernel_4d_simd(),
* starsh_ssdata_block_exp_kernel_nd_simd().
* @ingroup app-spatial-kernels
* */
{
int i, j, k;
STARSH_ssdata *data1 = row_data;
STARSH_ssdata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
double beta = -data1->beta;
double noise = data1->noise;
double sigma = data1->sigma;
// Get coordinates
size_t count1 = data1->particles.count;
size_t count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
#pragma omp simd
for(i = 1; i < ndim; i++)
{
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill column-major matrix
#pragma omp simd
for(j = 0; j < ncols; j++)
{
for(i = 0; i < nrows; i++)
{
dist = 0.0;
for(k = 0; k < ndim; k++)
{
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
dist = sqrt(dist)/beta;
if(dist == 0)
buffer[j*(size_t)ld+i] = sigma+noise;
else
buffer[j*(size_t)ld+i] = sigma*exp(dist);
}
}
}
|
template-for-new-benchmark.c | /**
* template.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "../polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is N=1024. */
#include "template-for-new-benchmark.h"
/* Array initialization. */
static
void init_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for firstprivate(j ,C ,i )
for (i = 0; i < n; i++)
#pragma omp parallel for firstprivate(j ,C ,i )
for (j = 0; j < n; j++)
C[i][j] = 42;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_template(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
#pragma omp parallel for firstprivate(j ,C ,i )
for (i = 0; i < _PB_N; i++)
#pragma omp parallel for firstprivate(j ,C ,i )
for (j = 0; j < _PB_N; j++)
C[i][j] += 42;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(C));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_template (n, POLYBENCH_ARRAY(C));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
return 0;
}
|
main.c | #include <fcntl.h>
#include <math.h>
#include <omp.h>
#include <sys/param.h>
typedef struct Stack Stack;
typedef struct Queue Queue;
typedef struct Node Node;
struct Node {
Node *next;
unsigned int i;
double d;
};
struct Queue {
Node *first;
Node *last;
};
struct Stack {
Stack *head;
unsigned int i;
};
int
put(Queue *q,
const unsigned int i,
const double d) {
Node *n;
n = malloc(sizeof(Node));
if(!n)
return 1;
n->i = i;
n->d = d;
if(!q->first) {
q->first = q->last = n;
} else {
q->last->next = n;
q->last = n;
}
n->next = NULL;
return 0;
}
int
get(Queue *q,
unsigned int *i,
double *d) {
Node *tmp;
if(!q->first)
return 1;
*i = q->first->i;
*d = q->first->d;
tmp = q->first;
q->first = q->first->next;
free(tmp);
return 0;
}
unsigned int *
Simplicies(const unsigned int *tri,
const unsigned int m,
const unsigned int n) {
unsigned int *net;
unsigned int i, j, k, l, o;
unsigned int *lst[n], ex;
// node to simplicies map
// m: number of facets
// n: number of points
// alloc list of arrays
for(i = 0; i < n; i++) {
lst[i] = malloc(8 * sizeof(unsigned int));
if(!lst[i])
return NULL;
lst[i][0] = 8;
lst[i][1] = 2;
}
l = 0;
for(i = 0; i < m; i++) {
for(j = 0; j < 3; j++) {
k = tri[i*3+j];
ex = 0;
for(o = 2; o < lst[k][1]; o++) {
if(lst[k][o] == i) {
ex = 1;
break;
}
}
if(!ex) {
lst[k][lst[k][1]++] = i;
l++;
}
if(lst[k][0] < lst[k][1] + 2) {
lst[k][0] = lst[k][1] + 4;
lst[k] = realloc(lst[k], lst[k][0] * sizeof(unsigned int));
if(!lst[k])
return NULL;
}
}
}
// store in compressed row format
net = malloc((l + n + 1) * sizeof(unsigned int));
if(!net)
return NULL;
j = n + 1;
for(i = 0; i < n; i++) {
net[i] = j;
for(k = 2; k < lst[i][1]; k++) {
net[j++] = lst[i][k];
}
free(lst[i]);
}
net[n] = j;
return net;
}
unsigned int *
upstreamnetwork(const unsigned int *spx,
const unsigned int m, unsigned int *r) {
unsigned int *net;
unsigned int i, j, k, l, o;
unsigned int *lst[m], ex, itr;
// reverse facet flow network spx
// alloc list of arrays
for(i = 0; i < m; i++) {
lst[i] = malloc(8 * sizeof(unsigned int));
if(!lst[i])
return NULL;
lst[i][0] = 8;
lst[i][1] = 2;
}
l = 0;
for(i = 0; i < m; i++) {
itr = i * 2;
for(j = 0; j < 2; j++) {
k = spx[itr + j];
if(k == m)
continue;
ex = 0;
for(o = 2; o < lst[k][1]; o++) {
if(lst[k][o] == i) {
ex = 1;
break;
}
}
if(!ex) {
lst[k][lst[k][1]++] = i;
l++;
}
if(lst[k][0] < lst[k][1] + 2) {
lst[k][0] = lst[k][1] + 4;
lst[k] = realloc(lst[k], lst[k][0] * sizeof(unsigned int));
if(!lst[k])
return NULL;
}
}
}
// store in compressed row format
net = malloc((l + m + 1) * sizeof(unsigned int));
if(!net)
return NULL;
j = m + 1;
for(i = 0; i < m; i++) {
net[i] = j;
for(k = 2; k < lst[i][1]; k++) {
net[j++] = lst[i][k];
}
free(lst[i]);
}
net[m] = j;
*r = j;
return net;
}
unsigned int
SimplexOfNodes(const unsigned int *net,
const unsigned int a,
const unsigned int b,
const unsigned int x,
const unsigned int m) {
unsigned int i, k;
// find the not-x simplex of two nodes a and b
for(i = net[a]; i < net[a+1]; i++)
for(k = net[b]; k < net[b+1]; k++)
if(net[i] == net[k] && net[i] != x)
return net[i];
return m;
}
unsigned int
NodeOfSimplicies(const unsigned int *tri,
const unsigned int a,
const unsigned int b,
const double *z) {
int r;
unsigned int i, k, p, q;
double zmin;
// get the lowest node j of two facets a and b
p = a*3;
q = b*3;
zmin = 1E99;
r = -1;
for(i = 0; i < 3; i++) {
for(k = 0; k < 3; k++) {
if(tri[p+i] == tri[q+k]) {
if(z[tri[p+i]] < zmin) {
zmin = z[tri[p+i]];
r = tri[p+i];
}
}
}
}
if(r == -1)
exit(EXIT_FAILURE);
return r;
}
double
HeronsTriangle(double a, double b, double c) {
double d;
// return the area of a facet
// ! a >= b >= c
if(a < b) {
d = b;
b = a;
a = d;
}
if(b < c) {
d = c;
c = b;
b = d;
}
if(a < b) {
d = b;
b = a;
a = d;
}
return sqrt((a+(b+c))*(c-(a-b))*(c+(a-b))*(a+(b-c))) / 4;
}
void
linkthroughput(double *ltp,
const unsigned int *spx,
const double *spw,
const double *spa,
const unsigned int m) {
double ltpi;
unsigned int i, j, k, l;
unsigned int *seen, *ideg, itr;
Queue *que;
// initialize
seen = calloc(m, sizeof(unsigned int));
ideg = calloc(m, sizeof(unsigned int));
que = malloc(sizeof(Queue));
if(!que || !ideg || !seen)
exit(EXIT_FAILURE);
que->first = que->last = NULL;
// get in-degree
for(i = 0; i < m; i++) {
itr = i * 2;
for(j = 0; j < 2; j++) {
k = itr + j;
l = spx[k];
if(m > l)
ideg[l]++;
}
}
// start at facets without in-degree draining into l
for(i = 0; i < m; i++) {
if(!ideg[i]) {
itr = i * 2;
for(j = 0; j < 2; j++) {
k = itr + j;
l = spx[k];
ltp[k] = spa[k];
if(m > l)
if(put(que, l, ltp[k]))
exit(EXIT_FAILURE);
}
}
}
// work the queue
while(!get(que, &i, <pi)) {
seen[i]++;
itr = i * 2;
ltp[itr] += ltpi;
if(seen[i] == ideg[i]) {
// we collected all input for node i
ltpi = ltp[itr];
ltp[itr] = 0;
for(j = 0; j < 2; j++) {
k = itr + j;
l = spx[k];
// link throughput
ltp[k] = ltpi * spw[k] + spa[k];
if(m > l)
if(put(que, l, ltp[k]))
exit(EXIT_FAILURE);
}
}
}
}
void
network(unsigned int *spx, double *spw, double *spa, double *spd,
double *phi, double *theta,
const unsigned int *tri,
const double *x,
const double *y,
const double *z,
const unsigned int m,
const unsigned int n) {
int sgn;
double du, dv, dw, a, b, c;
double xx, yy, slp, frc;
double dx, dy, dz, dn, s, t;
double xa, xb, xc, ya, yb, yc;
double aa, ab, ac, bb, bc;
double phii, beta;
unsigned int i, j;
unsigned int u, v, w, q, p;
unsigned int *net;
// m: number of facets
// n: number of nodes
net = Simplicies(tri, m, n);
for(i = 0; i < m; i++) {
// at p, q we store the pos of children
p = i * 2;
q = i * 2 + 1;
for(j = 0; j < 3; j++) {
u = tri[i*3 + j];
v = tri[i*3 + (j+1)%3];
w = tri[i*3 + (j+2)%3];
// grad (dx,dy) of three point plane
dz = ((x[w]-x[u])*(y[v]-y[u]) - (y[w]-y[u])*(x[v]-x[u]));
dy = ((z[w]-z[u])*(x[v]-x[u]) - (x[w]-x[u])*(z[v]-z[u])) / dz;
dx = ((y[w]-y[u])*(z[v]-z[u]) - (z[w]-z[u])*(y[v]-y[u])) / dz;
// tri sides vs grad
xa = x[w] - x[u];
ya = y[w] - y[u];
xb = x[v] - x[u];
yb = y[v] - y[u];
// dot products
aa = xa*xa + ya*ya;
ab = xa*xb + ya*yb;
bb = xb*xb + yb*yb;
dn = 1. / (aa*bb - ab*ab);
for(sgn = -1; sgn <= 1; sgn += 2) {
xc = sgn * dx;
yc = sgn * dy;
ac = xa*xc + ya*yc;
bc = xb*xc + yb*yc;
s = (bb*ac - ab*bc) * dn;
t = (aa*bc - ab*ac) * dn;
if(s >= 0 && t >= 0) {
phii = atan2(dy, dx);
phi[i] = phii;
theta[i] = atan(sqrt(dx*dx + dy*dy));
if(phii < 0)
phii += M_PI;
a = sqrt(xa*xa + ya*ya);
b = sqrt(xb*xb + yb*yb);
if(sgn > 0) {
spx[p] = SimplexOfNodes(net, w, v, i, m);
spx[q] = m;
spw[p] = 1;
spw[q] = 0;
c = sqrt((x[v]-x[w])*(x[v]-x[w])+(y[v]-y[w])*(y[v]-y[w]));
spa[p] = HeronsTriangle(a, b, c);
spa[q] = 0;
beta = atan2(y[w]-y[v], x[w]-x[v]);
if(beta < 0)
beta += M_PI;
beta -= phii;
if(beta > M_PI / 2)
beta = M_PI - beta;
spd[i] = c * fabs(sin(beta));
} else {
slp = dy / dx;
frc = (y[w] - y[v]) / (x[w] - x[v]);
if(dx) {
if(x[w] != x[v])
xx = (yb + x[u]*slp - x[v]*frc) / (slp - frc);
else
xx = x[w];
yy = (xx - x[u])*slp + y[u];
} else {
xx = x[u];
yy = (xx - x[w])*frc + y[w];
}
if(isinf(yy)) {
fprintf(stderr, "flat triangle %i (u:%.2f v:%.2f w:%.2f)\n", i, z[u], z[v], z[w]);
spw[p] = 0.5;
spw[q] = 0.5;
c = sqrt((x[v]-x[w])*(x[v]-x[w])+(y[v]-y[w])*(y[v]-y[w]));
spa[p] = HeronsTriangle(a, b, c) / 2.0;
spa[q] = spa[p];
} else {
du = sqrt((xx-x[u])*(xx-x[u])+(yy-y[u])*(yy-y[u]));
dv = sqrt((xx-x[v])*(xx-x[v])+(yy-y[v])*(yy-y[v]));
dw = sqrt((xx-x[w])*(xx-x[w])+(yy-y[w])*(yy-y[w]));
spw[p] = dv / (dv+dw);
spw[q] = dw / (dv+dw);
spa[p] = HeronsTriangle(b, dv, du);
spa[q] = HeronsTriangle(a, dw, du);
}
spx[p] = SimplexOfNodes(net, u, v, i, m);
spx[q] = SimplexOfNodes(net, u, w, i, m);
beta = atan2(yb, xb);
if(beta < 0)
beta += M_PI;
beta -= phii;
if(beta > M_PI / 2)
beta = M_PI - beta;
spd[i] = b * fabs(sin(beta));
beta = atan2(ya, xa);
if(beta < 0)
beta += M_PI;
beta -= phii;
if(beta > M_PI / 2)
beta = M_PI - beta;
spd[i] += a * fabs(sin(beta));
}
j = 3;
break;
}
}
}
}
free(net);
}
void
tunnel(unsigned int *spx, double *spw, double *spa,
const unsigned int *tri,
const double *x,
const double *y,
const double *z,
const unsigned int m,
const unsigned int n,
const double tubemaxdist) {
double zu, zv, dv;
unsigned int p, q, u, v, w;
unsigned int i, j, k, l, s, t;
unsigned int msinks, nsinks, mm;
unsigned int *net, *seen, *sinks, dst;
unsigned int *sinku, *uniqu, *udest;
Queue *que;
// m: number of facets
// n: number of points
net = Simplicies(tri, m, n);
mm = m + m;
sinks = malloc(mm * 2 * sizeof(unsigned int));
sinku = malloc(mm * 2 * sizeof(unsigned int));
if(!sinks || !sinku)
exit(EXIT_FAILURE);
//#pragma omp parallel for private(i,j,k,l,s,p,q,u,v,zu,zv,dst)
//we don't want to have this in parallel because we manipulate spx[l*2+k] = dst
for(i = 0; i < m; i++) {
p = i * 2;
for(j = 0; j < 2; j++) {
q = p + j;
sinks[q] = mm;
sinku[q] = n;
l = spx[q];
if(l == m)
continue;
// check whether two neighboring facets flow into each other
if(spx[l*2] == i || spx[l*2+1] == i) {
// get lowest node of these two facets
u = NodeOfSimplicies(tri, i, l, z);
zu = z[u];
dst = m;
for(k = net[u]; k < net[u+1]; k++) {
v = net[k];
if(v == i || v == l)
continue;
zv = z[tri[v*3]];
if(z[tri[v*3+1]] > zv)
zv = z[tri[v*3+1]];
if(z[tri[v*3+2]] > zv)
zv = z[tri[v*3+2]];
if(zv == zu) {
dst = v;
break;
}
}
if(dst < m) {
spx[q] = dst;
// rewire also the other facet to that lower facet (l->dest)
for(k = 0; k < 2; k++)
if(spx[l*2+k] == i)
spx[l*2+k] = dst;
} else {
sinks[q] = q;
sinku[q] = u;
}
}
}
}
msinks = 0;
for(i = 0; i < mm; i++) {
if(sinks[i] < mm) {
sinks[msinks] = sinks[i];
sinku[msinks++] = sinku[i];
}
}
sinks = realloc(sinks, msinks * sizeof(unsigned int));
sinku = realloc(sinku, msinks * sizeof(unsigned int));
uniqu = malloc(n * sizeof(unsigned int));
udest = malloc(n * sizeof(unsigned int));
if(!uniqu || !udest)
exit(EXIT_FAILURE);
#pragma omp parallel for
for(i = 0; i < n; i++) {
uniqu[i] = n;
udest[i] = m;
}
for(i = 0; i < msinks; i++) {
u = sinku[i];
uniqu[u] = u;
}
nsinks = 0;
for(i = 0; i < n; i++)
if(uniqu[i] < n)
uniqu[nsinks++] = uniqu[i];
uniqu = realloc(uniqu, nsinks * sizeof(unsigned int));
#pragma omp parallel for private(i,k,s,t,u,v,w,zu,zv,dv,seen,que) schedule(dynamic, 4)
for(i = 0; i < nsinks; i++) {
u = uniqu[i];
zu = z[u];
que = malloc(sizeof(Queue));
seen = calloc(m, sizeof(unsigned int));
if(!que || !seen)
exit(EXIT_FAILURE);
que->first = que->last = NULL;
for(k = net[u]; k < net[u+1]; k++) {
v = net[k];
seen[v]++;
if(put(que, v, 0))
exit(EXIT_FAILURE);
}
while(!get(que, &v, &dv)) {
if(dv > tubemaxdist) {
break;
}
zv = z[tri[v*3]];
if(z[tri[v*3+1]] > zv)
zv = z[tri[v*3+1]];
if(z[tri[v*3+2]] > zv)
zv = z[tri[v*3+2]];
if(zv < zu) {
udest[u] = v;
break;
}
for(s = 0; s < 3; s++) {
t = tri[v*3+s];
for(k = net[t]; k < net[t+1]; k++) {
w = net[k];
if(seen[w])
continue;
seen[w]++;
if(put(que, w, dv+1))
exit(EXIT_FAILURE);
}
}
}
while(!get(que, &v, &dv));
free(seen);
free(que);
}
free(uniqu);
free(net);
//tend = omp_get_wtime();
//printf("%.4f\n", tend - tini);
//tini = tend;
#pragma omp parallel for private(i,q,u)
for(i = 0; i < msinks; i++) {
q = sinks[i];
u = sinku[i];
spx[q] = udest[u];
}
free(udest);
free(sinks);
free(sinku);
// fix spw and spa
#pragma omp parallel for private(i,p)
for(i = 0; i < m; i++) {
p = i * 2;
if(spx[p] == m && spx[p+1] < m) {
spa[p+1] += spa[p];
spw[p+1] = 1;
spw[p] = 0;
} else if(spx[p] < m && spx[p+1] == m) {
spa[p] += spa[p+1];
spw[p] = 1;
spw[p+1] = 0;
}
}
}
void
rivers(unsigned int *ind, const double *sca,
const unsigned int *net, const unsigned int *rev,
const unsigned int m, const double fac) {
unsigned int i, j, k;
Queue *que;
double nl;
que = malloc(sizeof(Queue));
if(!que)
exit(EXIT_FAILURE);
que->first = que->last = NULL;
for(i = 0; i < m; i++) {
if(ind[i])
if(put(que, i, 0))
exit(EXIT_FAILURE);
}
while(!get(que, &i, &nl)) {
for(j = rev[i]; j < rev[i+1]; j++) {
k = rev[j];
if(ind[k])
continue;
if(sca[k] < sca[i]*fac)
continue;
ind[k] = 1;
if(put(que, k, 0))
exit(EXIT_FAILURE);
}
}
free(que);
}
void
convergence(double *conv, const double *sca,
const unsigned int *net, const unsigned int *rev,
const unsigned int *sub, const unsigned int slen,
const unsigned int m, const unsigned int nsamples) {
unsigned int i, j, k, l, s, t, d;
double qoff, qdia, sum, wgh, ak, aj, nl;
Queue *que;
unsigned int *dtr, *utr, *seen, *mask;
mask = calloc(m, sizeof(unsigned int));
if(!mask)
exit(EXIT_FAILURE);
#pragma omp parallel for private(s)
for(s = 0; s < slen; s++)
mask[sub[s]] = 1;
#pragma omp parallel for private(i)
for(i = 0; i < m; i++) {
if(mask[i])
mask[i] = 0;
else
mask[i] = 1;
}
#pragma omp parallel for private(i,j,k,l,s,t,d,ak,aj,qoff,qdia,sum,wgh,nl,que,dtr,utr,seen)
for(s = 0; s < slen; s++) {
// downstream window
i = sub[s];
seen = malloc(m * sizeof(unsigned int));
que = malloc(sizeof(Queue));
dtr = malloc(nsamples * sizeof(unsigned int));
if(!que || !seen || !dtr)
exit(EXIT_FAILURE);
memcpy(seen, mask, m * sizeof(unsigned int));
que->first = que->last = NULL;
if(put(que, i, 0))
exit(EXIT_FAILURE);
seen[i] = 1;
d = 0;
while(!get(que, &j, &nl)) {
for(l = 0; l < 2; l++) {
k = net[l+j*2];
if(k == m)
continue;
if(seen[k])
continue;
seen[k] = 1;
if(put(que, k, 0))
exit(EXIT_FAILURE);
dtr[d++] = k;
if(d == nsamples) {
while(!get(que, &j, &nl));
break;
}
}
}
free(que);
free(seen);
if(d < nsamples) {
conv[i] = NAN;
free(dtr);
continue;
}
// upstream window
seen = malloc(m * sizeof(unsigned int));
que = malloc(sizeof(Queue));
utr = malloc(nsamples * sizeof(unsigned int));
if(!que || !seen || !utr)
exit(EXIT_FAILURE);
memcpy(seen, mask, m * sizeof(unsigned int));
que->first = que->last = NULL;
if(put(que, i, 0))
exit(EXIT_FAILURE);
seen[i] = 1;
d = 0;
while(!get(que, &j, &nl)) {
for(l = rev[j]; l < rev[j+1]; l++) {
k = rev[l];
if(seen[k])
continue;
seen[k] = 1;
if(put(que, k, 0))
exit(EXIT_FAILURE);
utr[d++] = k;
if(d == nsamples) {
while(!get(que, &j, &nl));
break;
}
}
}
free(que);
free(seen);
if(d < nsamples) {
conv[i] = NAN;
free(dtr);
free(utr);
continue;
}
sum = 0;
wgh = 0;
for(l = 1; l < d; l++) {
ak = sca[dtr[l]];
for(t = 0; t < l; t++) {
aj = sca[dtr[t]];
sum += abs(ak - aj);
wgh += 1;
}
}
for(l = 1; l < d; l++) {
ak = sca[utr[l]];
for(t = 0; t < l; t++) {
aj = sca[utr[t]];
sum += abs(ak - aj);
wgh += 1;
}
}
qdia = sum / wgh;
sum = 0;
wgh = 0;
for(l = 0; l < d; l++) {
ak = sca[utr[l]];
for(t = 0; t < d; t++) {
aj = sca[dtr[t]];
sum += abs(ak - aj);
wgh += 1;
}
}
qoff = sum / wgh;
conv[i] = qoff - qdia;
free(dtr);
free(utr);
}
}
|
main_decades_biscuit.h | int main(int argc, char** argv) {
char *x_to_y_fname;
bgraph x_to_y_bgraph;
weight_type *y_project;
assert(argc == 2);
x_to_y_fname = argv[1];
x_to_y_bgraph = parse_bgraph(x_to_y_fname);
y_project = (weight_type*) calloc(get_projection_size(x_to_y_bgraph), sizeof(weight_type));
assert(y_project);
preprocess(x_to_y_bgraph, y_project);
decades_is_init();
auto start = std::chrono::system_clock::now();
printf("Running kernel\n");
omp_set_dynamic(0);
omp_set_num_threads(2);
#pragma omp parallel
{
int tid = omp_get_thread_num();
if (tid == 0) {
_kernel_(x_to_y_bgraph, y_project, tid, 16);
} else if (tid == 1) {
_kernel_biscuit(0);
}
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time: " << elapsed_seconds.count() << "s\n";
double total = 0.0;
for (int i = 0; i < get_projection_size(x_to_y_bgraph); i++) {
total += y_project[i];
}
printf("Finished hash: %.3f\n", total);
clean_bgraph(x_to_y_bgraph);
free(y_project);
print_is_info();
return 1;
}
|
dbatch.c | #include "kog.h"
#include "mem.h"
#ifdef USE_MSR
#include "msr.h"
#endif /* USE_MSR */
#include "wre.h"
int main(int argc, char *argv[])
{
if (4 != argc) {
(void)fprintf(stderr, "%s n #batches infile\n", argv[0]);
return EXIT_FAILURE;
}
const size_t n = atoz(argv[1]);
if (!n) {
perror("atoz(n)");
return EXIT_FAILURE;
}
// TODO: FIXME for n not a power of two
if (_mm_popcnt_u64(n) > (__int64)1) {
perror("n not a power of two");
return EXIT_FAILURE;
}
const size_t b = atoz(argv[2]);
if (!b) {
perror("atoz(b)");
return EXIT_FAILURE;
}
FILE *const f = fopen(argv[3], "rb");
if (!f) {
perror("fopen");
return EXIT_FAILURE;
}
Dmem *const d = Dalloc(n);
if (!d)
return EXIT_FAILURE;
Tout *const t = Talloc(n);
if (!t)
return EXIT_FAILURE;
(void)fprintf(stdout, "\"DBATCH\",\"WTIMEs\",\"K2\",\"RE\",\"OU\",\"OV\"");
#ifdef USE_MSR
(void)fprintf(stdout, ",\"A_M\"");
#endif /* USE_MSR */
(void)fprintf(stdout, "\n");
(void)fflush(stdout);
const size_t V = n2V(n);
#ifdef USE_MSR
const size_t mt = (size_t)omp_get_max_threads();
FILE *const mf = fopen("dmsr.csv", "w");
if (!mf) {
perror("fopen(dmsr.csv)");
return EXIT_FAILURE;
}
(void)fprintf(mf, "\"DBATCH\",\"TIX\",\"MPERF\",\"APERF\"\n");
(void)fflush(mf);
#endif /* USE_MSR */
for (size_t j = (size_t)1u; j <= b; ++j) {
if (n != fread(d->r.A11, sizeof(double), n, f)) {
perror("fread(A11r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A21, sizeof(double), n, f)) {
perror("fread(A21r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A12, sizeof(double), n, f)) {
perror("fread(A12r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A22, sizeof(double), n, f)) {
perror("fread(A22r)");
return EXIT_FAILURE;
}
double
#ifdef USE_MSR
avg = 0.0,
#endif /* USE_MSR */
w = omp_get_wtime();
#ifdef _OPENMP
#ifdef USE_MSR
#pragma omp parallel default(none) shared(j,V,d,mf) reduction(+:avg)
#else /* !USE_MSR */
#pragma omp parallel default(none) shared(V,d)
#endif /* ?USE_MSR */
{
#endif /* _OPENMP */
#ifdef USE_MSR
const int tix = omp_get_thread_num();
const int cfd = msr_open(msr_mycpu());
uint64_t aperf = UINT64_C(0), mperf = UINT64_C(0);
if (cfd >= 0) {
(void)msr_read(cfd, IA32_MPERF, &mperf);
(void)msr_read(cfd, IA32_APERF, &aperf);
}
#endif /* USE_MSR */
#ifdef _OPENMP
#pragma omp for
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < V; ++i) {
const size_t k = (i << VLlg);
d8svd2_
((d->r.A11 + k), (d->r.A21 + k), (d->r.A12 + k), (d->r.A22 + k),
(d->r.U11 + k), (d->r.U21 + k), (d->r.U12 + k), (d->r.U22 + k),
(d->r.V11 + k), (d->r.V21 + k), (d->r.V12 + k), (d->r.V22 + k),
(d->v.S1 + k), (d->v.S2 + k), (d->v.s + k));
}
#ifdef USE_MSR
if (cfd >= 0) {
uint64_t mval = UINT64_C(0), aval = UINT64_C(0);
(void)msr_read(cfd, IA32_MPERF, &mval);
(void)msr_read(cfd, IA32_APERF, &aval);
(void)msr_close(cfd);
mperf = ((mval > mperf) ? (mval - mperf) : UINT64_C(0));
aperf = ((aval > aperf) ? (aval - aperf) : UINT64_C(0));
avg = ((mperf && aperf && (mperf > aperf)) ? (((double)aperf) / mperf) : 1.0);
}
else
avg = 1.0;
#pragma omp critical
{
(void)fprintf(mf, "%zu,%d,%lu,%lu\n", j, tix, mperf, aperf);
(void)fflush(mf);
}
#endif /* USE_MSR */
#ifdef _OPENMP
}
#endif /* _OPENMP */
w = omp_get_wtime() - w;
#ifdef USE_MSR
avg /= mt;
#endif /* USE_MSR */
wdre
(n, t->K2, t->RE, t->OU, t->OV,
d->r.A11, d->r.A21, d->r.A12, d->r.A22,
d->r.U11, d->r.U21, d->r.U12, d->r.U22,
d->r.V11, d->r.V21, d->r.V12, d->r.V22,
d->v.S1, d->v.S2, d->v.s);
// TODO: FIXME for n not a power of two
for (size_t k = n >> 1u; k; k >>= 1u) {
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(k,t)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < k; ++i) {
(t->K2)[i] = fmaxw((t->K2)[i], (t->K2)[i + k]);
(t->RE)[i] = fmaxw((t->RE)[i], (t->RE)[i + k]);
(t->OU)[i] = fmaxw((t->OU)[i], (t->OU)[i + k]);
(t->OV)[i] = fmaxw((t->OV)[i], (t->OV)[i + k]);
}
}
(void)Bwre(stdout, j, w, *(t->K2), *(t->RE), *(t->OU), *(t->OV),
#ifdef USE_MSR
&avg
#else /* !USE_MSR */
(const double*)NULL
#endif /* ?USE_MSR */
);
}
#ifdef USE_MSR
(void)fclose(mf);
#endif /* USE_MSR */
(void)Tfree(t);
(void)Dfree(d);
return (fclose(f) ? EXIT_FAILURE : EXIT_SUCCESS);
}
|
stencil_par.c | /*******************************************************************
NAME: Stencil
PURPOSE: This program tests the efficiency with which a space-invariant,
linear, symmetric filter (stencil) can be applied to a square
grid or image.
USAGE: The program takes as input the linear
dimension of the grid, and the number of iterations on the grid
<progname> <iterations> <grid size>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
HISTORY: This program is based on the stencil program from the Parallel
Reserach kernels. To learn more, go to
https://github.com/ParRes/Kernels
**********************************************************************************/
#include <stdio.h>
#ifdef APPLE
#include <stdlib.h>
#endif
#include <sys/time.h>
#include <omp.h>
#define USEC_TO_SEC 1.0e-6 /* to convert microsecs to secs */
#define DEF_SIZE 2000
#define RADIUS 8
//#define RESTRICT restrict
#define RESTRICT
#define EPSILON 1.0e-8
#define COEFX 1.0
#define COEFY 1.0
#define EXIT_FAIL 66
#define EXIT_SUCCESS 0
/* define shorthand for indexing a multi-dimensional array */
#define IN(i,j) in[i+(j)*(n)]
#define OUT(i,j) out[i+(j)*(n)]
#define WEIGHT(ii,jj) weight[ii+RADIUS][jj+RADIUS]
#define ABS(x) ((x)>0 ? (x) : (-(x)))
double wtime() {
double time_seconds;
struct timeval time_data; /* seconds since 0 GMT */
gettimeofday(&time_data,NULL);
time_seconds = (double) time_data.tv_sec;
time_seconds += (double) time_data.tv_usec * USEC_TO_SEC;
return time_seconds;
}
int main(int argc, char ** argv) {
long n; /* linear grid dimension */
int i, j, ii, jj, iter;/* dummies */
double norm = 0.0, /* L1 norm of solution */
reference_norm;
double f_active_points; /* interior of grid with respect to stencil */
double flops; /* floating point ops per iteration */
int iterations=25; /* number of times to run the algorithm */
double stencil_time, /* timing parameters */
avgtime;
int stencil_size; /* number of points in stencil */
double * RESTRICT in; /* input grid values */
double * RESTRICT out; /* output grid values */
long total_length; /* total required length to store grid values */
double weight[2*RADIUS+1][2*RADIUS+1]; /* weights of points in the stencil */
if(argc ==2){
n = atoi(argv[1]);
}
else{
n = DEF_SIZE;
}
if (2*RADIUS +1 > n) {
printf("ERROR: Stencil radius %d exceeds grid size %d\n", RADIUS, n);
exit(EXIT_FAIL);
}
/* allocate the required space */
total_length = n*n*sizeof(double);
in = (double *) malloc(total_length);
out = (double *) malloc(total_length);
if (!in || !out) {
printf("ERROR: could not allocate space for input or output array\n");
exit(EXIT_FAIL);
}
/* fill the stencil weights to reflect a discrete divergence operator */
stencil_size = (2*RADIUS+1)*(2*RADIUS+1);
for (jj=-RADIUS; jj<= RADIUS; jj++) for (ii=-RADIUS; ii<= RADIUS; ii++)
WEIGHT(ii,jj)=0.0;
for (jj=1; jj<=RADIUS; jj++) {
for (ii=-jj+1; ii<jj; ii++) {
WEIGHT(ii,jj) = (double) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS));
WEIGHT(ii,-jj) = -(double) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS));
WEIGHT(jj,ii) = (double) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS));
WEIGHT(-jj,ii) = -(double) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS));
}
WEIGHT(jj,jj) = (double) (1.0/(4.0*jj*RADIUS));
WEIGHT(-jj,-jj) = -(double) (1.0/(4.0*jj*RADIUS));
}
f_active_points = (double) (n-2*RADIUS)*(double) (n-2*RADIUS);
printf("Serial stencil execution on 2D grid\n");
printf("Grid size = %d\n", n);
printf("Radius of stencil = %d\n", RADIUS);
printf("Type of stencil = compact\n");
printf("Number of iterations = %d\n", iterations);
/* intialize the input and output arrays */
#pragma omp parallel for collapse(2) private(i,j)
for (j=0; j<n; j++) for (i=0; i<n; i++)
IN(i,j) = COEFX*i+COEFY*j;
#pragma omp parallel for collapse(2) private(i,j)
for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++)
OUT(i,j) = 0.0;
for (iter = 0; iter<=iterations; iter++){
/* start timer after a warmup iteration */
if (iter == 1) stencil_time = wtime();
/* Apply the stencil operator */
#pragma omp parallel for collapse(2) private(ii,jj,i,j)
for (j=RADIUS; j<n-RADIUS; j++) {
for (i=RADIUS; i<n-RADIUS; i++) {
/* would like to be able to unroll this loop, but compiler will ignore */
for (jj=-RADIUS; jj<=RADIUS; jj++)
for (ii=-RADIUS; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,jj)*IN(i+ii,j+jj);
}
}
/* add constant to solution to force refresh of input data */
#pragma omp parallel for collapse(2) private(i,j)
for (j=0; j<n; j++) for (i=0; i<n; i++) IN(i,j)+= 1.0;
} /* end of iterations */
stencil_time = wtime() - stencil_time;
/* compute L1 norm */
for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++) {
norm += (double)ABS(OUT(i,j));
}
norm /= f_active_points;
/******************************************************************************
** Analyze and output results.
*******************************************************************************/
/* verify correctness */
reference_norm = (double) (iterations+1) * (COEFX + COEFY);
if (ABS(norm-reference_norm) > EPSILON) {
printf("ERROR: L1 norm = %lf, Reference L1 norm = %lf\n",
norm, reference_norm);
exit(EXIT_FAIL);
}
else {
printf("Solution validates\n");
}
flops = (double) (2*stencil_size+1) * f_active_points;
avgtime = stencil_time/iterations;
printf("Parallel Rate (MFlops/s): %lf Avg time (s): %lf\n",
1.0E-06 * flops/avgtime, avgtime);
exit(EXIT_SUCCESS);
}
|
trmv_x_sky_n_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT r = 0; r < m; ++r)
{
const ALPHA_INT row_start = A->pointers[r];
const ALPHA_INT row_end = A->pointers[r + 1];
ALPHA_INT row_indx = 1;
for(ALPHA_INT i = row_start; i < row_end; i++)
{
ALPHA_INT row_eles = row_end - row_start;
ALPHA_INT c = r - row_eles + row_indx;
ALPHA_Number t;
alpha_mul(t, alpha, A->values[i]);
alpha_madde(y[r], t, x[c]);
row_indx ++;
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
hello_world.c | #include<stdio.h>
#include<omp.h>
int main() {
omp_set_num_threads(4);
#pragma omp parallel
{
printf("%d\n", omp_get_thread_num());
}
} |
5298.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) {
int t14;
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 500; t2 += 1) {
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14];
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 8)
for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 64)
for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 16)
for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1)
A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14];
}
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) {
for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(8*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(8*t3+Nx-5,32));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),8*t4+6);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
DRB053-inneronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
But the inner level loop can be parallelized.
*/
#include <string.h>
int main(int argc,char *argv[])
{
int i;
int j;
double a[20][20];
memset(a,0,(sizeof(a)));
#pragma omp parallel for private(i, j)
for (i = 0; i < 20; i++)
#pragma omp parallel for private(j)
for (j = 0; j < 20; j++)
a[i][j] = i * 20 + j;
for (i = 0; i < 20 -1; i += 1) {
#pragma omp parallel for private(j)
for (j = 0; j < 20; j += 1) {
a[i][j] += a[i + 1][j];
}
}
for (i = 0; i < 20; i++)
for (j = 0; j < 20; j++)
printf("%lf\n",a[i][j]);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(16*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(16*t3+Nx+12,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),64*t4+62),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
star2d3r.c | #define BENCH_DIM 2
#define BENCH_FPP 25
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.06251f * A[t%2][i-3][j] + 0.06255f * A[t%2][i-2][j] + 0.06245f * A[t%2][i-1][j] +
0.06252f * A[t%2][i][j-3] + 0.06249f * A[t%2][i][j-2] + 0.06244f * A[t%2][i][j-1] +
0.25002f * A[t%2][i][j] + 0.06248f * A[t%2][i][j+1] + 0.06243f * A[t%2][i][j+2] +
0.06253f * A[t%2][i][j+3] + 0.06246f * A[t%2][i+1][j] + 0.06242f * A[t%2][i+2][j] +
0.06254f * A[t%2][i+3][j];
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.06251f * A[t%2][i-3][j] + 0.06255f * A[t%2][i-2][j] + 0.06245f * A[t%2][i-1][j] +
0.06252f * A[t%2][i][j-3] + 0.06249f * A[t%2][i][j-2] + 0.06244f * A[t%2][i][j-1] +
0.25002f * A[t%2][i][j] + 0.06248f * A[t%2][i][j+1] + 0.06243f * A[t%2][i][j+2] +
0.06253f * A[t%2][i][j+3] + 0.06246f * A[t%2][i+1][j] + 0.06242f * A[t%2][i+2][j] +
0.06254f * A[t%2][i+3][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
divsufsort.c.inc.h | /*
* divsufsort.c for libdivsufsort
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "divsufsort_private.h"
#ifdef _OPENMP
# include <omp.h>
#endif
/*- Private Functions -*/
/* Sorts suffixes of type B*. */
static
saidx_t
sort_typeBstar(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n) {
saidx_t *PAb, *ISAb, *buf;
#ifdef _OPENMP
saidx_t *curbuf;
saidx_t l;
#endif
saidx_t i, j, k, t, m, bufsize;
saint_t c0, c1;
#ifdef _OPENMP
saint_t d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) =(saidx_t)(k - SA); }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = (saidx_t)(k - SA);
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
saidx_t
construct_BWT(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k, *orig;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((saidx_t)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = (saidx_t)(k - SA); }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = (saidx_t)(k - SA);
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return (saidx_t)(orig - SA);
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
saint_t
divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) {
saidx_t *bucket_A, *bucket_B;
saidx_t m;
saint_t err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
saidx_t
divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) {
saidx_t *B;
saidx_t *bucket_A, *bucket_B;
saidx_t m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
const char *
divsufsort_version(void) {
return PROJECT_VERSION_FULL;
}
|
beam_vdif.c | /********************************************************
* *
* Licensed under the Academic Free License version 3.0 *
* *
********************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fftw3.h>
#include "vdifio.h"
#include "psrfits.h"
#include "star/pal.h"
#include "star/palmac.h"
#include "beam_common.h"
#include "beam_vdif.h"
#include "mwa_header.h"
#include "vdifio.h"
#include "ascii_header.h"
#include "filter.h"
#include "mycomplex.h"
#ifndef HAVE_CUDA
#include <omp.h>
#endif
void vdif_write_second( struct vdifinfo *vf, vdif_header *vhdr,
float *data_buffer_vdif )
{
float *data_buffer_ptr = data_buffer_vdif;
size_t offset_out_vdif = 0;
int8_t *out_buffer_8_vdif = (int8_t *)malloc(vf->block_size);
while (offset_out_vdif < vf->block_size) {
// Add the current header
memcpy( (out_buffer_8_vdif + offset_out_vdif), vhdr, VDIF_HEADER_SIZE );
// Offset into the output array
offset_out_vdif += VDIF_HEADER_SIZE;
// Convert from float to int8
float2int8_trunc( data_buffer_ptr, vf->sizeof_beam, -126.0, 127.0,
(out_buffer_8_vdif + offset_out_vdif) );
to_offset_binary( (out_buffer_8_vdif + offset_out_vdif),
vf->sizeof_beam );
offset_out_vdif += vf->frame_length - VDIF_HEADER_SIZE; // increment output offset
data_buffer_ptr += vf->sizeof_beam;
nextVDIFHeader( vhdr, vf->frame_rate );
}
// Write a full second's worth of samples
vdif_write_data( vf, out_buffer_8_vdif );
free( out_buffer_8_vdif );
}
void vdif_write_data( struct vdifinfo *vf, int8_t *output )
{
// form the filename
// there is a standard naming convention
char filename[1030];
sprintf( filename, "%s.vdif", vf->basefilename );
//fprintf(stderr,"Attempting to open VDIF file for writing: %s\n",filename);
FILE *fs = fopen( filename, "a" );
fwrite( output, vf->block_size, 1, fs );
fclose( fs );
// write a CPSR2 test header for DSPSR
char ascii_header[MWA_HEADER_SIZE] = MWA_HEADER_INIT;
//ascii_header_set( ascii_header, "UTC_START", "%s", vf->date_obs );
ascii_header_set( ascii_header, "DATAFILE", "%s", filename );
ascii_header_set( ascii_header, "INSTRUMENT", "%s", "VDIF" );
ascii_header_set( ascii_header, "TELESCOPE", "%s", vf->telescope );
ascii_header_set( ascii_header, "MODE", "%s", vf->obs_mode );
ascii_header_set( ascii_header, "FREQ", "%f", vf->fctr );
ascii_header_set( ascii_header, "BW", "%f", vf->BW );
ascii_header_set( ascii_header, "RA", "%s", vf->ra_str );
ascii_header_set( ascii_header, "DEC", "%s", vf->dec_str );
ascii_header_set( ascii_header, "SOURCE", "%s", vf->source );
sprintf( filename, "%s.hdr", vf->basefilename );
fs = fopen( filename,"w" );
fwrite( ascii_header, MWA_HEADER_SIZE, 1, fs );
fclose( fs );
}
void populate_vdif_header(
struct vdifinfo *vf,
vdif_header *vhdr,
char *metafits,
char *obsid,
char *time_utc,
int sample_rate,
long int frequency,
int nchan,
long int chan_width,
char *rec_channel,
struct delays *delay_vals,
int npointing )
{
for ( int p=0; p<npointing; p++ )
{
// First how big is a DataFrame
vf[p].bits = 8; // this is because it is all the downstream apps support (dspsr/diFX)
vf[p].iscomplex = 1; // (it is complex data)
vf[p].nchan = 2; // I am hardcoding this to 2 channels per thread - one per pol
vf[p].samples_per_frame = 128; // also hardcoding to 128 time-samples per frame
vf[p].sample_rate = sample_rate*128; // = 1280000 (also hardcoding this to the raw channel rate)
vf[p].BW = 1.28;
vf[p].frame_length = (vf[p].nchan * (vf[p].iscomplex+1) * vf[p].samples_per_frame) +
VDIF_HEADER_SIZE; // = 544
vf[p].threadid = 0;
sprintf( vf[p].stationid, "mw" );
vf[p].frame_rate = sample_rate; // = 10000
vf[p].block_size = vf[p].frame_length * vf[p].frame_rate; // = 5440000
// A single frame (128 samples). Remember vf.nchan is kludged to npol
vf[p].sizeof_beam = vf[p].samples_per_frame * vf[p].nchan * (vf[p].iscomplex+1); // = 512
// One full second (1.28 million 2 bit samples)
vf[p].sizeof_buffer = vf[p].frame_rate * vf[p].sizeof_beam; // = 5120000
createVDIFHeader( vhdr, vf[p].frame_length, vf[p].threadid, vf[p].bits, vf[p].nchan,
vf[p].iscomplex, vf[p].stationid);
// Now we have to add the time
uint64_t start_day = delay_vals->intmjd;
uint64_t start_sec = roundf( delay_vals->fracmjd * 86400.0 );
uint64_t mjdsec = (start_day * 86400) + start_sec; // Note the VDIFEpoch is strange - from the standard
setVDIFEpoch( vhdr, start_day );
setVDIFMJDSec( vhdr, mjdsec );
setVDIFFrameNumber( vhdr, 0 );
// Get the project ID directly from the metafits file
fitsfile *fptr = NULL;
int status = 0;
fits_open_file(&fptr, metafits, READONLY, &status);
fits_read_key(fptr, TSTRING, "PROJECT", vf[p].exp_name, NULL, &status);
fits_close_file(fptr, &status);
strncpy( vf[p].scan_name, obsid, 17 );
vf[p].b_scales = (float *)malloc( sizeof(float) * vf[p].nchan );
vf[p].b_offsets = (float *)malloc( sizeof(float) * vf[p].nchan );
vf[p].got_scales = 1;
strncpy( vf[p].telescope, "MWA", 24);
strncpy( vf[p].obs_mode, "PSR", 8);
// Determine the RA and Dec strings
double ra2000 = delay_vals[p].mean_ra * PAL__DR2D;
double dec2000 = delay_vals[p].mean_dec * PAL__DR2D;
dec2hms(vf[p].ra_str, ra2000/15.0, 0); // 0 = no '+' sign
dec2hms(vf[p].dec_str, dec2000, 1); // 1 = with '+' sign
strncpy( vf[p].date_obs, time_utc, 24);
vf[p].MJD_epoch = delay_vals->intmjd + delay_vals->fracmjd;
vf[p].fctr = (frequency + (nchan/2.0)*chan_width)/1.0e6; // (MHz)
strncpy( vf[p].source, "unset", 24 );
// The output file basename
int ch = atoi(rec_channel);
sprintf( vf[p].basefilename, "%s_%s_%s_%s_ch%03d",
vf[p].exp_name, vf[p].scan_name, vf[p].ra_str, vf[p].dec_str, ch);
}
}
ComplexFloat get_std_dev_complex(ComplexFloat *input, int nsamples)
{
// assume zero mean
float rtotal = 0;
float itotal = 0;
float isigma = 0;
float rsigma = 0;
int i;
for (i=0;i<nsamples;i++){
rtotal = rtotal+(CRealf(input[i])*CRealf(input[i]));
itotal = itotal+(CImagf(input[i])*CImagf(input[i]));
}
rsigma = sqrtf((1.0/(nsamples-1))*rtotal);
isigma = sqrtf((1.0/(nsamples-1))*itotal);
return CMakef( rsigma, isigma );
}
void set_level_occupancy(ComplexFloat *input, int nsamples, float *new_gain)
{
//float percentage = 0.0;
//float occupancy = 17.0;
//float limit = 0.00001;
//float step = 0.001;
int i = 0;
float gain = *new_gain;
float percentage_clipped = 100;
//while (percentage_clipped > 0 && percentage_clipped > limit) {
int clipped = 0;
for (i = 0; i < nsamples; i++) {
if (isnan(CRealf(input[i])) || isnan(CImagf(input[i])))
{
fprintf( stderr, "error: set_level_occupancy: input[%d] = "
"NaN\n", i );
exit(EXIT_FAILURE);
}
if (fabs(gain*CRealf(input[i])) > 127 || fabs(gain*CImagf(input[i])) > 127 )
{
clipped++;
}
}
percentage_clipped = ((float) clipped/nsamples) * 100;
//The reduction in the gain was commented out until we work our a robust solution
//if (percentage_clipped > limit) {
// gain = gain - step;
//}
if (clipped > 0)
{
fprintf(stdout,"warning: percentage samples clipped %f percent\n",percentage_clipped);
}
//}
*new_gain = gain;
}
void get_mean_complex( ComplexFloat *input, int nsamples, float *rmean,
float *imean, ComplexFloat *cmean)
{
int i;
float rtotal = 0;
float itotal = 0 ;
ComplexFloat ctotal = CMakef( 0.0, 0.0 );
for (i = 0; i < nsamples; i++)
{
//if (isnan(CRealf(input[i])) || isnan(CImagf(input[i]))) { fprintf(stderr, "\ninput[%d] = %e + %e*I\n\n", i, CRealf(input[i]), CImagf(input[i])); exit(1); }
rtotal += CRealf( input[i] );
itotal += CImagf( input[i] );
ctotal = CAddf( ctotal, input[i] );
}
*rmean = rtotal / nsamples;
*imean = itotal / nsamples;
*cmean = CSclf( ctotal, 1.0 / (float)nsamples );
}
void normalise_complex(ComplexFloat *input, int nsamples, float scale)
{
int i=0;
for (i=0;i<nsamples;i++){
input[i] = CSclf( input[i], scale );
}
}
void to_offset_binary(int8_t *i, int n)
{
int j;
for (j = 0; j < n; j++) {
i[j] = i[j] ^ 0x80;
}
}
#ifndef HAVE_CUDA
void invert_pfb_ifft( ComplexDouble ***detected_beam, int file_no,
int nsamples, int nchan, int npol,
float *data_buffer_vdif )
/* "Invert the PFB" by simply applying an inverse FFT.
* This function expects "detected_beam" to be structured as follows:
*
* detected_beam[2*nsamples][nchan][npol]
*
* Although detected_samples potentially contains 2 seconds' worth of data,
* this function only FFTs one second. The appropriate second is worked out
* using file_no: if it is even, the first half of detected_beam is used,
* if odd, the second half.
*
* The output of the inverse FFT is packed back into data_buffer_vdif, a 1D
* array whose ordering is as follows:
*
* time, pol, complexity
*
* This ordering is suited for immediate output to the VDIF format.
*/
{
// Allocate FFTW arrays
int arr_size = nsamples * nchan * npol;
fftwf_complex *in = (fftwf_complex *)fftwf_malloc( arr_size * sizeof(fftwf_complex) );
// Create a plan for doing column-wise 1D transforms
int rank = 1;
int n[] = { nchan };
int howmany = nsamples * npol;
int idist = nchan;
int odist = nchan;
int istride = 1;
int ostride = 1;
int *inembed = n, *onembed = n;
fftwf_plan p = fftwf_plan_many_dft( rank, n, howmany,
in, inembed, istride, idist,
in, onembed, ostride, odist,
FFTW_BACKWARD, FFTW_ESTIMATE );
// Populate the FFTW arrays such that the middle channel of detected_beam
// is placed nearest the DC term.
int s; // sample index
#pragma omp parallel for
for (s = 0; s < nsamples; s ++)
{
int ds, ch, pol;
int ii; // "in" index
int chi; // corrected channel index for "in" array
// Calculate the proper sample index for this second
ds = (file_no % 2)*nsamples + s;
for (ch = 0; ch < nchan; ch++ )
for (pol = 0; pol < npol; pol++)
{
// Swap the two halves of the array
chi = (ch < nchan/2 ? ch + (nchan/2) : ch - (nchan/2));
// Calculate the "in" index
ii = nchan * npol * s +
nchan * pol +
chi;
// Copy across the data (but set DC bin to 0)
in[ii] = (chi == 0 ? 0.0 : detected_beam[ds][ch][pol]);
}
}
/*
fprintf( stderr, " First column to be iFFT'd (inside invert_pfb_ifft()): [\n" );
for (s = 0; s < nchan; s++)
fprintf( stderr, " %f + %f*I\n", creal(in[s]), cimag(in[s]) );
fprintf( stderr, "]\n" );
*/
// Execute the FFT
fftwf_execute( p );
// Pack result into the output array
#pragma omp parallel for
for (s = 0; s < nsamples; s ++)
{
int ch, pol;
int ii, oi; // "in" index & "out" index
for (ch = 0; ch < nchan; ch++ )
for (pol = 0; pol < npol; pol++)
{
// Calculate the "in" index
ii = nchan * npol * s +
nchan * pol +
ch;
// Calculate the "out" index ("ch" here turns into a subdivision
// of time)
oi = 2 * npol * nchan * s +
2 * npol * ch +
2 * pol;
// Copy data across, dividing by nchan to account for the lack of
// normalisation in the FFTW library.
data_buffer_vdif[oi] = crealf(in[ii]) / (double)nchan;
data_buffer_vdif[oi+1] = cimagf(in[ii]) / (double)nchan;
}
}
// Clean up
fftwf_free( in );
fftwf_destroy_plan( p );
}
void invert_pfb_ord( ComplexDouble ***detected_beam, int file_no,
int nsamples, int nchan, int npol,
ComplexDouble **fils, int fil_size,
float *data_buffer_uvdif )
/* "Invert the PFB" by applying a resynthesis filter.
* This function expects "detected_beam" to be structured as follows:
*
* detected_beam[2*nsamples][nchan][npol]
*
* Although detected_samples potentially contains 2 seconds' worth of data,
* this function only inverts 1 second. The appropriate second is worked out
* using file_no: if it is even, the first half of detected_beam is used,
* if odd, the second half.
*
* The output of the inversion is packed back into data_buffer_vdif, a 1D
* array whose ordering is as follows:
*
* time, pol, complexity
*
* This ordering is suited for immediate output to the VDIF format.
*
* Finally, fils points to a 2D array of filter coefficients, each row of
* which has been "rotated" with phase ramps of different amounts. It is
* assumed that fils has size:
*
* fils[nchan][fil_size]
*/
{
// Set the output buffer to zeros
int s;
#pragma omp parallel for
for (s = 0; s < npol*nchan*nsamples*2; s++)
{
data_buffer_uvdif[s] = 0.0;
}
// Loop over (output) sample -- embarassingly parallel
#pragma omp parallel for
for (s = 0; s < nchan*nsamples; s++)
{
//fprintf( stderr, " Thread num: %d, s = %d\n", omp_get_thread_num(), s );
int U = nchan; // upsampling factor = number of channels
int i0; // The index of the first input sample to
// be included in the output sum
int f0; // The index of the first filter coeffi-
// cient to be included in the output sum
int N = nsamples * U; // The total number of output samples
int ch, f, i, pol, oi; // Various loop counters
ComplexDouble part;
for (pol = 0; pol < npol; pol++)
{
// Calculate the output index for data_buffer_uvdif
oi = 2*npol*s + 2*pol;
// First take care of the corner case = the very first second
if (file_no == 0 && s < fil_size - 1)
{
//data_buffer_uvdif[oi ] = 0.0; // "real"
//data_buffer_uvdif[oi+1] = 0.0; // "imag"
continue;
}
// Calculate the first input idx to be included in this out sample
if (file_no % 2 == 0)
i0 = ((s + 2*N - fil_size + U) / U) % (2*nsamples);
else // file_no % 2 == 1
i0 = (s + 1*N - fil_size + U) / U;
// Calculate the first filter coefficient index
f0 = (U - (s % U) - 1) % U;
// Loop over channels and filter coefficients to calculate output
for (ch = 0; ch < nchan; ch++)
//for (ch = 3; ch < 4; ch++)
{
i = i0;
for (f = f0; f < fil_size; f += U)
{
part = CMuld( fils[ch][(fil_size-1) - f], detected_beam[i][ch][pol] );
data_buffer_uvdif[oi ] += CReald(part);
data_buffer_uvdif[oi+1] += CImagd(part);
// Update input index simultaneously with filter coeff
i++;
if (i == 2*nsamples) i = 0; // (i.e. loop back around to
// the other second)
} // Loop over relevant filter coefficients
} // Loop over channels
// Normalise the result
data_buffer_uvdif[oi ] /= nchan;
data_buffer_uvdif[oi+1] /= nchan;
} // Loop over X/Y pol
} // Loop over samples
}
#endif
|
MzXMLHandler.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2013.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Andreas Bertsch $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#ifndef OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#define OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/FORMAT/Base64.h>
#include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h>
#include <OpenMS/FORMAT/HANDLERS/XMLHandler.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/INTERFACES/IMSDataConsumer.h>
#include <stack>
namespace OpenMS
{
class MetaInfoInterface;
namespace Internal
{
/**
@brief XML handlers for MzXMLFile
MapType has to be a MSExperiment or have the same interface.
Do not use this class. It is only needed in MzXMLFile.
*/
template <typename MapType>
class MzXMLHandler :
public XMLHandler
{
public:
/**@name Constructors and destructor */
//@{
/// Constructor for a read-only handler
MzXMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(&exp),
cexp_(0),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Constructor for a write-only handler
MzXMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(0),
cexp_(&exp),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Destructor
virtual ~MzXMLHandler() {}
//@}
// Docu in base class
virtual void endElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname);
// Docu in base class
virtual void startElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname, const xercesc::Attributes& attributes);
// Docu in base class
virtual void characters(const XMLCh* const chars, const XMLSize_t length);
/// Write the contents to a stream
void writeTo(std::ostream& os);
/// Sets the options
void setOptions(const PeakFileOptions& options)
{
options_ = options;
}
///Gets the scan count
UInt getScanCount()
{
return scan_count_;
}
/// Set the IMSDataConsumer consumer which will consume the read data
void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType> * consumer)
{
consumer_ = consumer;
}
private:
/// initialize members (call from C'tor)
void init_()
{
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';', cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;;;;;;;;;;;;;APCI;;;;;;;;MALDI").split(';', cv_terms_[2]);
cv_terms_[2].resize(IonSource::SIZE_OF_IONIZATIONMETHOD);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';', cv_terms_[3]);
cv_terms_[3].resize(MassAnalyzer::SIZE_OF_ANALYZERTYPE);
//Detector
String(";EMT;;;Faraday Cup;;;;;Channeltron;Daly;Microchannel plate").split(';', cv_terms_[4]);
cv_terms_[4].resize(IonDetector::SIZE_OF_TYPE);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';', cv_terms_[5]);
cv_terms_[5].resize(MassAnalyzer::SIZE_OF_RESOLUTIONMETHOD);
/* // OLD:
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';',cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;TSP;MALDI;FD;FI;PD;SI;TI;API;ISI;CID;CAD;HN;APCI;APPI;ICP").split(';',cv_terms_[2]);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';',cv_terms_[3]);
//Detector
String(";EMT;Daly;;Faraday Cup;;;;Channeltron").split(';',cv_terms_[4]);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';',cv_terms_[5]);
*/
}
protected:
/// Peak type
typedef typename MapType::PeakType PeakType;
/// Spectrum type
typedef MSSpectrum<PeakType> SpectrumType;
/// map pointer for reading
MapType* exp_;
/// map pointer for writing
const MapType* cexp_;
/// Options for loading and storing
PeakFileOptions options_;
/**@name temporary data structures to hold parsed data */
//@{
Base64 decoder_;
Int nesting_level_;
/**
@brief Data necessary to generate a single spectrum
Small struct holds all data necessary to populate a spectrum at a
later timepoint (since reading of the base64 data and generation of
spectra can be done at distinct timepoints).
*/
struct SpectrumData
{
UInt peak_count_;
String precision_;
String compressionType_;
String char_rest_;
SpectrumType spectrum;
bool skip_data;
};
/// Vector of spectrum data stored for later parallel processing
std::vector< SpectrumData > spectrum_data_;
//@}
/// Flag that indicates whether this spectrum should be skipped (due to options)
bool skip_spectrum_;
/// spectrum counter (spectra without peaks are not written)
UInt spec_write_counter_;
/// Consumer class to work on spectra
Interfaces::IMSDataConsumer<MapType>* consumer_;
/// Consumer class to work on spectra
UInt scan_count_;
/// Progress logging class
const ProgressLogger& logger_;
/// write metaInfo to xml (usually in nameValue-tag)
inline void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, int indent = 4, String tag = "nameValue")
{
std::vector<String> keys; // Vector to hold keys to meta info
meta.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
if ((*it)[0] != '#') // internally used meta info start with '#'
{
os << String(indent, '\t') << "<" << tag << " name=\"" << *it << "\" value=\"" << meta.getMetaValue(*it) << "\"/>\n";
}
}
}
/// data processing auxiliary variable
std::vector<DataProcessing> data_processing_;
/**
@brief Fill a single spectrum with data from input
@note Do not modify any internal state variables of the class since
this function will be executed in parallel.
*/
void doPopulateSpectraWithData_(SpectrumData & spectrum_data)
{
typedef typename SpectrumType::PeakType PeakType;
Base64 decoder_;
//std::cout << "reading scan" << "\n";
if (spectrum_data.char_rest_ == "") // no peaks
{
return;
}
//remove whitespaces from binary data
//this should not be necessary, but linebreaks inside the base64 data are unfortunately no exception
spectrum_data.char_rest_.removeWhitespaces();
if (spectrum_data.precision_ == "64")
{
std::vector<DoubleReal> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
// check if peak in in the specified m/z and intensity range
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
else //precision 32
{
std::vector<Real> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
}
/**
@brief Populate all spectra on the stack with data from input
Will populate all spectra on the current work stack with data (using
multiple threads if available) and append them to the result.
*/
void populateSpectraWithData_()
{
// Whether spectrum should be populated with data
if (options_.getFillData())
{
size_t errCount = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++)
{
// parallel exception catching and re-throwing business
if (!errCount) // no need to parse further if already an error was encountered
{
try
{
doPopulateSpectraWithData_(spectrum_data_[i]);
}
catch (...)
{
#pragma omp critical(HandleException)
++errCount;
}
}
}
if (errCount != 0)
{
throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data.");
}
}
// Append all spectra
for (Size i = 0; i < spectrum_data_.size(); i++)
{
if (consumer_ != NULL)
{
consumer_->consumeSpectrum(spectrum_data_[i].spectrum);
if (options_.getAlwaysAppendData())
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
else
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
// Delete batch
spectrum_data_.clear();
}
private:
/// Not implemented
MzXMLHandler();
static const XMLCh* s_value_;
static const XMLCh* s_count_;
static const XMLCh* s_type_;
static const XMLCh* s_name_;
static const XMLCh* s_version_;
static const XMLCh* s_filename_;
static const XMLCh* s_filetype_;
static const XMLCh* s_filesha1_;
static const XMLCh* s_completiontime_;
static const XMLCh* s_precision_;
static const XMLCh* s_byteorder_;
static const XMLCh* s_pairorder_;
static const XMLCh* s_compressionType_;
static const XMLCh* s_precursorintensity_;
static const XMLCh* s_precursorcharge_;
static const XMLCh* s_windowwideness_;
static const XMLCh* s_mslevel_;
static const XMLCh* s_peakscount_;
static const XMLCh* s_polarity_;
static const XMLCh* s_scantype_;
static const XMLCh* s_retentiontime_;
static const XMLCh* s_startmz_;
static const XMLCh* s_endmz_;
static const XMLCh* s_first_;
static const XMLCh* s_last_;
static const XMLCh* s_phone_;
static const XMLCh* s_email_;
static const XMLCh* s_uri_;
static const XMLCh* s_num_;
static const XMLCh* s_intensitycutoff_;
static const XMLCh* s_centroided_;
static const XMLCh* s_deisotoped_;
static const XMLCh* s_chargedeconvoluted_;
// init all the static members, which is necessary because otherwise the undefined order will cause problems
void initStaticMembers_()
{
static bool init(false);
if (!init)
{
s_value_ = xercesc::XMLString::transcode("value");
s_count_ = xercesc::XMLString::transcode("scanCount");
s_type_ = xercesc::XMLString::transcode("type");
s_name_ = xercesc::XMLString::transcode("name");
s_version_ = xercesc::XMLString::transcode("version");
s_filename_ = xercesc::XMLString::transcode("fileName");
s_filetype_ = xercesc::XMLString::transcode("fileType");
s_filesha1_ = xercesc::XMLString::transcode("fileSha1");
s_completiontime_ = xercesc::XMLString::transcode("completionTime");
s_precision_ = xercesc::XMLString::transcode("precision");
s_byteorder_ = xercesc::XMLString::transcode("byteOrder");
s_pairorder_ = xercesc::XMLString::transcode("pairOrder");
s_compressionType_ = xercesc::XMLString::transcode("compressionType");
s_precursorintensity_ = xercesc::XMLString::transcode("precursorIntensity");
s_precursorcharge_ = xercesc::XMLString::transcode("precursorCharge");
s_windowwideness_ = xercesc::XMLString::transcode("windowWideness");
s_mslevel_ = xercesc::XMLString::transcode("msLevel");
s_peakscount_ = xercesc::XMLString::transcode("peaksCount");
s_polarity_ = xercesc::XMLString::transcode("polarity");
s_scantype_ = xercesc::XMLString::transcode("scanType");
s_retentiontime_ = xercesc::XMLString::transcode("retentionTime");
s_startmz_ = xercesc::XMLString::transcode("startMz");
s_endmz_ = xercesc::XMLString::transcode("endMz");
s_first_ = xercesc::XMLString::transcode("first");
s_last_ = xercesc::XMLString::transcode("last");
s_phone_ = xercesc::XMLString::transcode("phone");
s_email_ = xercesc::XMLString::transcode("email");
s_uri_ = xercesc::XMLString::transcode("URI");
s_num_ = xercesc::XMLString::transcode("num");
s_intensitycutoff_ = xercesc::XMLString::transcode("intensityCutoff");
s_centroided_ = xercesc::XMLString::transcode("centroided");
s_deisotoped_ = xercesc::XMLString::transcode("deisotoped");
s_chargedeconvoluted_ = xercesc::XMLString::transcode("chargeDeconvoluted");
init = true;
}
return;
}
};
//--------------------------------------------------------------------------------
// this cannot be moved into a function as VS2008 does not allow more than 31 static members in a function .. don't ask...
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_value_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_count_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_type_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_name_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_version_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filename_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filetype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filesha1_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_completiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precision_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_byteorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_pairorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_compressionType_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorintensity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorcharge_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_windowwideness_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_mslevel_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_peakscount_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_polarity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_scantype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_retentiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_startmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_endmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_first_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_last_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_phone_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_email_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_uri_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_num_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_intensitycutoff_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_centroided_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_deisotoped_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_chargedeconvoluted_ = 0;
template <typename MapType>
void MzXMLHandler<MapType>::startElement(const XMLCh* const /*uri*/,
const XMLCh* const /*local_name*/, const XMLCh* const qname,
const xercesc::Attributes& attributes)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
static bool init_static_members(false);
if (!init_static_members)
{
initStaticMembers_();
}
String tag = sm_.convert(qname);
open_tags_.push_back(tag);
//std::cout << " -- Start -- "<< tag << " -- " << "\n";
//Skip all tags until the the next scan
if (skip_spectrum_ && tag != "scan")
return;
if (tag == "msRun")
{
Int count = 0;
optionalAttributeAsInt_(count, attributes, s_count_);
exp_->reserve(count);
logger_.startProgress(0, count, "loading mzXML file");
scan_count_ = 0;
data_processing_.clear();
//start and end time are xs:duration. This makes no sense => ignore them
}
else if (tag == "parentFile")
{
SourceFile sf;
sf.setNameOfFile(attributeAsString_(attributes, s_filename_));
sf.setFileType(attributeAsString_(attributes, s_filetype_));
sf.setChecksum(attributeAsString_(attributes, s_filesha1_), SourceFile::SHA1);
exp_->getSourceFiles().push_back(sf);
}
else if (tag == "software")
{
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "dataProcessing")
{
data_processing_.back().getSoftware().setVersion(attributeAsString_(attributes, s_version_));
data_processing_.back().getSoftware().setName(attributeAsString_(attributes, s_name_));
data_processing_.back().setMetaValue("#type", String(attributeAsString_(attributes, s_type_)));
String time;
optionalAttributeAsString_(time, attributes, s_completiontime_);
data_processing_.back().setCompletionTime(asDateTime_(time));
}
else if (parent_tag == "msInstrument")
{
exp_->getInstrument().getSoftware().setVersion(attributeAsString_(attributes, s_version_));
exp_->getInstrument().getSoftware().setName(attributeAsString_(attributes, s_name_));
}
}
else if (tag == "peaks")
{
//precision
spectrum_data_.back().precision_ = "32";
optionalAttributeAsString_(spectrum_data_.back().precision_, attributes, s_precision_);
if (spectrum_data_.back().precision_ != "32" && spectrum_data_.back().precision_ != "64")
{
error(LOAD, String("Invalid precision '") + spectrum_data_.back().precision_ + "' in element 'peaks'");
}
//byte order
String byte_order = "network";
optionalAttributeAsString_(byte_order, attributes, s_byteorder_);
if (byte_order != "network")
{
error(LOAD, String("Invalid or missing byte order '") + byte_order + "' in element 'peaks'. Must be 'network'!");
}
//pair order
String pair_order = "m/z-int";
optionalAttributeAsString_(pair_order, attributes, s_pairorder_);
if (pair_order != "m/z-int")
{
error(LOAD, String("Invalid or missing pair order '") + pair_order + "' in element 'peaks'. Must be 'm/z-int'!");
}
//compressionType
spectrum_data_.back().compressionType_ = "none";
optionalAttributeAsString_(spectrum_data_.back().compressionType_, attributes, s_compressionType_);
if (spectrum_data_.back().compressionType_ != "none" && spectrum_data_.back().compressionType_ != "zlib")
{
error(LOAD, String("Invalid compression type ") + spectrum_data_.back().compressionType_ + "in elements 'peaks'. Must be 'none' or 'zlib'! ");
}
}
else if (tag == "precursorMz")
{
//add new precursor
spectrum_data_.back().spectrum.getPrecursors().push_back(Precursor());
//intensity
try
{
spectrum_data_.back().spectrum.getPrecursors().back().setIntensity(attributeAsDouble_(attributes, s_precursorintensity_));
}
catch (Exception::ParseError& /*e*/)
{
error(LOAD, "Mandatory attribute 'precursorIntensity' of tag 'precursorMz' not found! Setting precursor intensity to zero!");
}
//charge
Int charge = 0;
if (optionalAttributeAsInt_(charge, attributes, s_precursorcharge_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setCharge(charge);
}
//window bounds (here only the width is stored in both fields - this is corrected when we parse the m/z position)
DoubleReal window = 0.0;
if (optionalAttributeAsDouble_(window, attributes, s_windowwideness_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(window);
}
}
else if (tag == "scan")
{
skip_spectrum_ = false;
nesting_level_++;
if (options_.getMetadataOnly())
throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__);
// check if the scan is in the desired MS / RT range
UInt ms_level = attributeAsInt_(attributes, s_mslevel_);
if (ms_level == 0)
{
warning(LOAD, String("Invalid 'msLevel' attribute with value '0' in 'scan' element found. Assuming ms level 1!"));
ms_level = 1;
}
//parse retention time and convert it from xs:duration to seconds
DoubleReal retention_time = 0.0;
String time_string = "";
if (optionalAttributeAsString_(time_string, attributes, s_retentiontime_))
{
time_string = time_string.suffix('T');
//std::cout << "Initial trim: " << time_string << "\n";
if (time_string.has('H'))
{
retention_time += 3600 * asDouble_(time_string.prefix('H'));
time_string = time_string.suffix('H');
//std::cout << "After H: " << time_string << "\n";
}
if (time_string.has('M'))
{
retention_time += 60 * asDouble_(time_string.prefix('M'));
time_string = time_string.suffix('M');
//std::cout << "After M: " << time_string << "\n";
}
if (time_string.has('S'))
{
retention_time += asDouble_(time_string.prefix('S'));
time_string = time_string.suffix('S');
//std::cout << "After S: " << time_string << "\n";
}
}
logger_.setProgress(scan_count_);
if ((options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(retention_time)))
|| (options_.hasMSLevels() && !options_.containsMSLevel(ms_level))
|| options_.getSizeOnly())
{
// skip this tag
skip_spectrum_ = true;
++scan_count_;
return;
}
// Add a new spectrum, initialize and set MS level and RT
spectrum_data_.resize(spectrum_data_.size() + 1); // TODO !!
spectrum_data_.back().peak_count_ = 0;
spectrum_data_.back().spectrum.setMSLevel(ms_level);
spectrum_data_.back().spectrum.setRT(retention_time);
spectrum_data_.back().spectrum.setNativeID(String("scan=") + attributeAsString_(attributes, s_num_));
//peak count == twice the scan size
spectrum_data_.back().peak_count_ = attributeAsInt_(attributes, s_peakscount_);
spectrum_data_.back().spectrum.reserve(spectrum_data_.back().peak_count_ / 2 + 1);
spectrum_data_.back().spectrum.setDataProcessing(data_processing_);
//centroided, chargeDeconvoluted, deisotoped, collisionEnergy are ignored
//other optional attributes
ScanWindow window;
optionalAttributeAsDouble_(window.begin, attributes, s_startmz_);
optionalAttributeAsDouble_(window.end, attributes, s_endmz_);
if (window.begin != 0.0 || window.end != 0.0)
{
spectrum_data_.back().spectrum.getInstrumentSettings().getScanWindows().push_back(window);
}
String polarity = "any";
optionalAttributeAsString_(polarity, attributes, s_polarity_);
spectrum_data_.back().spectrum.getInstrumentSettings().setPolarity((IonSource::Polarity) cvStringToEnum_(0, polarity, "polarity"));
String type = "";
optionalAttributeAsString_(type, attributes, s_scantype_);
if (type == "")
{
//unknown/unset => do nothing here => no warning in the end
}
else if (type == "zoom")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Full")
{
if (ms_level > 1)
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM);
else
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "SIM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SIM);
}
else if (type == "SRM" || type == "MRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SRM);
}
else if (type == "CRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::CRM);
}
else if (type == "Q1")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Q3")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EMS") //Non-standard type: Enhanced MS (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EPI") //Non-standard type: Enhanced Product Ion (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
spectrum_data_.back().spectrum.setMSLevel(2);
}
else if (type == "ER") // Non-standard type: Enhanced Resolution (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
warning(LOAD, String("Unknown scan mode '") + type + "'. Assuming full scan");
}
++scan_count_;
}
else if (tag == "operator")
{
exp_->getContacts().resize(1);
exp_->getContacts().back().setFirstName(attributeAsString_(attributes, s_first_));
exp_->getContacts().back().setLastName(attributeAsString_(attributes, s_last_));
String tmp = "";
optionalAttributeAsString_(tmp, attributes, s_email_);
exp_->getContacts().back().setEmail(tmp);
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_phone_);
if (tmp != "")
{
exp_->getContacts().back().setMetaValue("#phone", tmp);
}
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_uri_);
exp_->getContacts().back().setURL(tmp);
}
else if (tag == "msManufacturer")
{
exp_->getInstrument().setVendor(attributeAsString_(attributes, s_value_));
}
else if (tag == "msModel")
{
exp_->getInstrument().setModel(attributeAsString_(attributes, s_value_));
}
else if (tag == "msIonisation")
{
exp_->getInstrument().getIonSources().resize(1);
exp_->getInstrument().getIonSources()[0].setIonizationMethod((IonSource::IonizationMethod) cvStringToEnum_(2, attributeAsString_(attributes, s_value_), "msIonization"));
}
else if (tag == "msMassAnalyzer")
{
exp_->getInstrument().getMassAnalyzers().resize(1);
exp_->getInstrument().getMassAnalyzers()[0].setType((MassAnalyzer::AnalyzerType) cvStringToEnum_(3, attributeAsString_(attributes, s_value_), "msMassAnalyzer"));
}
else if (tag == "msDetector")
{
exp_->getInstrument().getIonDetectors().resize(1);
exp_->getInstrument().getIonDetectors()[0].setType((IonDetector::Type) cvStringToEnum_(4, attributeAsString_(attributes, s_value_), "msDetector"));
}
else if (tag == "msResolution")
{
exp_->getInstrument().getMassAnalyzers()[0].setResolutionMethod((MassAnalyzer::ResolutionMethod) cvStringToEnum_(5, attributeAsString_(attributes, s_value_), "msResolution"));
}
else if (tag == "dataProcessing")
{
data_processing_.push_back(DataProcessing());
String boolean = "";
optionalAttributeAsString_(boolean, attributes, s_deisotoped_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back().getProcessingActions().insert(DataProcessing::DEISOTOPING);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_chargedeconvoluted_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back().getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION);
}
DoubleReal cutoff = 0.0;
optionalAttributeAsDouble_(cutoff, attributes, s_intensitycutoff_);
if (cutoff != 0.0)
{
data_processing_.back().setMetaValue("#intensity_cutoff", cutoff);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_centroided_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back().getProcessingActions().insert(DataProcessing::PEAK_PICKING);
}
}
else if (tag == "nameValue")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue(name, value);
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setMetaValue(name, value);
}
else
{
std::cout << " Warning: Unexpected tag 'nameValue' in tag '" << parent_tag << "'" << "\n";
}
}
else if (tag == "processingOperation")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
data_processing_.back().setMetaValue(name, value);
}
//std::cout << " -- !Start -- " << "\n";
}
template <typename MapType>
void MzXMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
//std::cout << " -- End -- " << sm_.convert(qname) << " -- " << "\n";
static const XMLCh* s_mzxml = xercesc::XMLString::transcode("mzXML");
static const XMLCh* s_scan = xercesc::XMLString::transcode("scan");
open_tags_.pop_back();
if (equal_(qname, s_mzxml))
{
// Flush the remaining data
populateSpectraWithData_();
// End of mzXML
logger_.endProgress();
}
else if (equal_(qname, s_scan))
{
// End of scan: go up one nesting level
// Check whether to populate spectra when on highest nesting level
nesting_level_--;
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
if (nesting_level_ == 0 && spectrum_data_.size() >= options_.getMaxDataPoolSize())
{
populateSpectraWithData_();
}
}
//std::cout << " -- End -- " << "\n";
sm_.clear();
}
template <typename MapType>
void MzXMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length)
{
//Abort if this spectrum should be skipped
if (skip_spectrum_)
return;
if (open_tags_.back() == "peaks")
{
//chars may be split to several chunks => concatenate them
if (options_.getFillData())
{
// Since we convert a Base64 string here, it can only contain plain ASCII
sm_.appendASCII(chars, length, spectrum_data_.back().char_rest_);
}
}
else if (open_tags_.back() == "offset" || open_tags_.back() == "indexOffset" || open_tags_.back() == "sha1")
{
}
else if (open_tags_.back() == "precursorMz")
{
char* transcoded_chars = sm_.convert(chars);
DoubleReal mz_pos = asDouble_(transcoded_chars);
//precursor m/z
spectrum_data_.back().spectrum.getPrecursors().back().setMZ(mz_pos);
//update window bounds - center them around the m/z pos
DoubleReal window_width = spectrum_data_.back().spectrum.getPrecursors().back().getIsolationWindowLowerOffset();
if (window_width != 0.0)
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(0.5 * window_width);
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowUpperOffset(0.5 * window_width);
}
}
else if (open_tags_.back() == "comment")
{
char* transcoded_chars = sm_.convert(chars);
String parent_tag = *(open_tags_.end() - 2);
//std::cout << "- Comment of parent " << parent_tag << "\n";
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue("#comment", String(transcoded_chars));
}
else if (parent_tag == "dataProcessing")
{
//this is currently ignored
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setComment(transcoded_chars);
}
else if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled comment '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
else
{
char* transcoded_chars = sm_.convert(chars);
if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled character content '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
}
template <typename MapType>
void MzXMLHandler<MapType>::writeTo(std::ostream& os)
{
//determine how many spectra there are (count only those with peaks)
UInt count_tmp_ = 0;
for (Size s = 0; s < cexp_->size(); s++)
{
const SpectrumType& spec = (*cexp_)[s];
if (spec.size() != 0)
++count_tmp_;
}
if (count_tmp_ == 0)
++count_tmp_;
logger_.startProgress(0, cexp_->size(), "storing mzXML file");
os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n"
<< "<mzXML xmlns=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1\" "
<< "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
<< "xsi:schemaLocation=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1 "
<< "http://sashimi.sourceforge.net/schema_revision/mzXML_2.1/mzXML_idx_2.1.xsd\">\n"
<< "\t<msRun scanCount=\"" << count_tmp_ << "\">\n";
//----------------------------------------------------------------------------------------
// parent files
//----------------------------------------------------------------------------------------
if (cexp_->getSourceFiles().empty())
{
os << "\t\t<parentFile fileName=\"\" fileType=\"processedData\" fileSha1=\"0000000000000000000000000000000000000000\"/>\n";
}
else
{
for (Size i = 0; i < cexp_->getSourceFiles().size(); ++i)
{
const SourceFile& sf = cexp_->getSourceFiles()[i];
os << "\t\t<parentFile fileName=\"" << sf.getNameOfFile() << "\" fileType=\"";
//file type is an enum in mzXML => search for 'raw' string
String tmp_string = sf.getFileType();
tmp_string.toLower();
if (tmp_string.hasSubstring("raw"))
{
os << "RAWData";
}
else
{
os << "processedData";
}
//Sha1 checksum must have 40 characters => create a fake if it is unknown
os << "\" fileSha1=\"";
tmp_string = sf.getChecksum();
if (sf.getChecksum().size() != 40 || sf.getChecksumType() != SourceFile::SHA1)
{
os << "0000000000000000000000000000000000000000";
}
else
{
os << sf.getChecksum();
}
os << "\"/>\n";
}
}
//----------------------------------------------------------------------------------------
//instrument
//----------------------------------------------------------------------------------------
if (cexp_->getInstrument() != Instrument() || cexp_->getContacts().size() != 0)
{
const Instrument& inst = cexp_->getInstrument();
os << "\t\t<msInstrument>\n"
<< "\t\t\t<msManufacturer category=\"msManufacturer\" value=\"" << inst.getVendor() << "\"/>\n" << "\t\t\t<msModel category=\"msModel\" value=\"" << inst.getModel() << "\"/>\n";
if (inst.getIonSources().empty() || !inst.getIonSources()[0].getIonizationMethod())
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"" << cv_terms_[2][inst.getIonSources()[0].getIonizationMethod()] << "\"/>\n";
}
const std::vector<MassAnalyzer>& analyzers = inst.getMassAnalyzers();
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"" << cv_terms_[3][analyzers[0].getType()] << "\"/>\n";
}
if (inst.getIonDetectors().empty() || !inst.getIonDetectors()[0].getType())
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"" << cv_terms_[4][inst.getIonDetectors()[0].getType()] << "\"/>\n";
}
os << "\t\t\t<software type=\"acquisition\" name=\"" << inst.getSoftware().getName() << "\" version=\"" << inst.getSoftware().getVersion() << "\"/>\n";
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"" << cv_terms_[5][analyzers[0].getResolutionMethod()] << "\"/>\n";
}
if (cexp_->getContacts().size() > 0)
{
const ContactPerson& cont = cexp_->getContacts()[0];
os << "\t\t\t<operator first=\"" << cont.getFirstName() << "\" last=\"" << cont.getLastName() << "\"";
if (cont.getEmail() != "")
{
os << " email=\"" << cont.getEmail() << "\"";
}
if (cont.getURL() != "")
{
os << " URI=\"" << cont.getURL() << "\"";
}
if (cont.metaValueExists("#phone"))
{
os << " phone=\"" << (String)(cont.getMetaValue("#phone")) << "\"";
}
os << "/>\n";
}
writeUserParam_(os, inst, 3);
if (inst.metaValueExists("#comment"))
{
os << "\t\t\t<comment>" << inst.getMetaValue("#comment") << "</comment>\n";
}
os << "\t\t</msInstrument>\n";
}
//----------------------------------------------------------------------------------------
//data processing (the information of the first spectrum is assigned to the whole file)
//----------------------------------------------------------------------------------------
if (cexp_->size() == 0 || (*cexp_)[0].getDataProcessing().empty())
{
os << "\t\t<dataProcessing>\n"
<< "\t\t\t<software type=\"processing\" name=\"\" version=\"\"/>\n"
<< "\t\t</dataProcessing>\n";
}
else
{
for (Size i = 0; i < (*cexp_)[0].getDataProcessing().size(); ++i)
{
const DataProcessing& data_processing = (*cexp_)[0].getDataProcessing()[i];
os << "\t\t<dataProcessing deisotoped=\""
<< data_processing.getProcessingActions().count(DataProcessing::DEISOTOPING)
<< "\" chargeDeconvoluted=\""
<< data_processing.getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION)
<< "\" centroided=\""
<< data_processing.getProcessingActions().count(DataProcessing::PEAK_PICKING)
<< "\"";
if (data_processing.metaValueExists("#intensity_cutoff"))
{
os << " intensityCutoff=\"" << data_processing.getMetaValue("#intensity_cutoff").toString() << "\"";
}
os << ">\n"
<< "\t\t\t<software type=\"";
if (data_processing.metaValueExists("#type"))
{
os << data_processing.getMetaValue("#type").toString();
}
else
{
os << "processing";
}
os << "\" name=\"" << data_processing.getSoftware().getName()
<< "\" version=\"" << data_processing.getSoftware().getVersion();
if (data_processing.getCompletionTime() != DateTime())
{
os << "\" completionTime=\"" << data_processing.getCompletionTime().get().substitute(' ', 'T');
}
os << "\"/>\n";
writeUserParam_(os, data_processing, 3, "processingOperation");
os << "\t\t</dataProcessing>\n";
}
}
//check if the nativeID of all spectra are numbers or numbers prefixed with 'scan='
//If not we need to renumber all spectra.
bool all_numbers = true;
bool all_empty = true;
bool all_prefixed_numbers = true;
for (Size s = 0; s < cexp_->size(); s++)
{
String native_id = (*cexp_)[s].getNativeID();
if (!native_id.hasPrefix("scan="))
{
all_prefixed_numbers = false;
}
else
{
native_id = native_id.substr(5);
}
try
{
native_id.toInt();
}
catch (Exception::ConversionError&)
{
all_numbers = false;
all_prefixed_numbers = false;
if (native_id != "")
{
all_empty = false;
}
}
}
//If we need to renumber and the nativeIDs were not empty, warn the user
if (!all_numbers && !all_empty)
{
warning(STORE, "Not all spectrum native IDs are numbers or correctly prefixed with 'scan='. The spectra are renumbered and the native IDs are lost!");
}
// write scans
std::stack<UInt> open_scans;
for (Size s = 0; s < cexp_->size(); s++)
{
logger_.setProgress(s);
const SpectrumType& spec = (*cexp_)[s];
UInt ms_level = spec.getMSLevel();
open_scans.push(ms_level);
Size spectrum_id = s + 1;
if (all_prefixed_numbers)
{
spectrum_id = spec.getNativeID().substr(5).toInt();
}
else if (all_numbers)
{
spectrum_id = spec.getNativeID().toInt();
}
os << String(ms_level + 1, '\t')
<< "<scan num=\"" << spectrum_id << "\" msLevel=\""
<< ms_level << "\" peaksCount=\""
<< spec.size() << "\" polarity=\"";
if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE)
{
os << "+";
}
else if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE)
{
os << "-";
}
else
{
os << "any";
}
//scan type
switch (spec.getInstrumentSettings().getScanMode())
{
case InstrumentSettings::UNKNOWN:
break;
case InstrumentSettings::MASSSPECTRUM:
case InstrumentSettings::MS1SPECTRUM:
case InstrumentSettings::MSNSPECTRUM:
if (spec.getInstrumentSettings().getZoomScan())
{
os << "\" scanType=\"zoom";
}
else
{
os << "\" scanType=\"Full";
}
break;
case InstrumentSettings::SIM:
os << "\" scanType=\"SIM";
break;
case InstrumentSettings::SRM:
os << "\" scanType=\"SRM";
break;
case InstrumentSettings::CRM:
os << "\" scanType=\"CRM";
break;
default:
os << "\" scanType=\"Full";
warning(STORE, String("Scan type '") + InstrumentSettings::NamesOfScanMode[spec.getInstrumentSettings().getScanMode()] + "' not supported by mzXML. Using 'Full' scan mode!");
}
os << "\" retentionTime=\"";
if (spec.getRT() < 0)
os << "-";
os << "PT" << std::fabs(spec.getRT()) << "S\"";
if (!spec.getInstrumentSettings().getScanWindows().empty())
{
os << " startMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].begin << "\" endMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].end << "\"";
}
if (spec.getInstrumentSettings().getScanWindows().size() > 1)
{
warning(STORE, "The MzXML format can store only one scan window for each scan. Only the first one is stored!");
}
os << ">\n";
for (Size i = 0; i < spec.getPrecursors().size(); ++i)
{
const Precursor& precursor = spec.getPrecursors()[i];
//intensity
os << String(ms_level + 2, '\t') << "<precursorMz precursorIntensity=\"" << precursor.getIntensity();
//charge
if (precursor.getCharge() != 0)
os << "\" precursorCharge=\"" << precursor.getCharge();
//window size
if (precursor.getIsolationWindowLowerOffset() + precursor.getIsolationWindowUpperOffset() > 0.0)
os << "\" windowWideness=\"" << (precursor.getIsolationWindowUpperOffset() + precursor.getIsolationWindowLowerOffset());
//m/z
os << "\">" << precursor.getMZ() << "</precursorMz>\n";
}
if (!spec.empty())
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\">";
//std::cout << "Writing scan " << s << "\n";
std::vector<Real> tmp;
for (Size i = 0; i < spec.size(); i++)
{
tmp.push_back(spec[i].getMZ());
tmp.push_back(spec[i].getIntensity());
}
String encoded;
decoder_.encode(tmp, Base64::BYTEORDER_BIGENDIAN, encoded);
os << encoded << "</peaks>\n";
}
else
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\" xsi:nil=\"true\"/>\n";
}
writeUserParam_(os, spec, ms_level + 2);
if (spec.getComment() != "")
{
os << String(ms_level + 2, '\t') << "<comment>" << spec.getComment() << "</comment>\n";
}
//check MS level of next scan and close scans (scans can be nested)
UInt next_ms_level = 0;
if (s < cexp_->size() - 1)
{
next_ms_level = ((*cexp_)[s + 1]).getMSLevel();
}
//std::cout << "scan: " << s << " this: " << ms_level << " next: " << next_ms_level << "\n";
if (next_ms_level <= ms_level)
{
for (Size i = 0; i <= ms_level - next_ms_level && !open_scans.empty(); ++i)
{
os << String(ms_level - i + 1, '\t') << "</scan>\n";
open_scans.pop();
}
}
}
os << "\t</msRun>\n"
<< "\t<indexOffset>0</indexOffset>\n"
<< "</mzXML>\n";
logger_.endProgress();
spec_write_counter_ = 1;
}
} // namespace Internal
} // namespace OpenMS
#endif
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine = cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return(cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double) resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double) support);
if ((filter_type == CubicFilter) || (window_type == CubicFilter))
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)-
cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict p;
register ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CopyPixels(const Quantum *source,const ssize_t source_offset,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
register ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
destination[channels*destination_offset+i]=source[source_offset*channels+i];
}
static inline void MixPixels(const Quantum *source,const ssize_t *source_offset,
const size_t source_size,Quantum *destination,
const ssize_t destination_offset,const size_t channels)
{
ssize_t
sum;
register ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
{
register ssize_t
j;
sum=0;
for (j=0; j < (ssize_t) source_size; j++)
sum+=source[source_offset[j]*channels+i];
destination[channels*destination_offset+i]=(Quantum) (sum/source_size);
}
}
static inline void Mix2Pixels(const Quantum *source,
const ssize_t source_offset1,const ssize_t source_offset2,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
const ssize_t
offsets[2] = { source_offset1, source_offset2 };
MixPixels(source,offsets,2,destination,destination_offset,channels);
}
static inline int PixelsEqual(const Quantum *source1,ssize_t offset1,
const Quantum *source2,ssize_t offset2,const size_t channels)
{
register ssize_t
i;
offset1*=channels;
offset2*=channels;
for (i=0; i < (ssize_t) channels; i++)
if (source1[offset1+i] != source2[offset2+i])
return(0);
return(1);
}
static inline void Eagle2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
i;
(void) source;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,0,result,0,channels);
if (PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels))
CopyPixels(pixels,2,result,1,channels);
if (PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels))
CopyPixels(pixels,6,result,2,channels);
if (PixelsEqual(pixels,5,pixels,8,channels) &&
PixelsEqual(pixels,8,pixels,7,channels))
CopyPixels(pixels,8,result,3,channels);
}
static void Hq2XHelper(const unsigned int rule,const Quantum *source,
Quantum *destination,const ssize_t destination_offset,const size_t channels,
const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d,
const ssize_t f,const ssize_t h)
{
#define caseA(N,A,B,C,D) \
case N: \
{ \
const ssize_t \
offsets[4] = { A, B, C, D }; \
\
MixPixels(source,offsets,4,destination,destination_offset,channels);\
break; \
}
#define caseB(N,A,B,C,D,E,F,G,H) \
case N: \
{ \
const ssize_t \
offsets[8] = { A, B, C, D, E, F, G, H }; \
\
MixPixels(source,offsets,8,destination,destination_offset,channels);\
break; \
}
switch (rule)
{
case 0:
{
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
caseA(1,e,e,e,a)
caseA(2,e,e,e,d)
caseA(3,e,e,e,b)
caseA(4,e,e,d,b)
caseA(5,e,e,a,b)
caseA(6,e,e,a,d)
caseB(7,e,e,e,e,e,b,b,d)
caseB(8,e,e,e,e,e,d,d,b)
caseB(9,e,e,e,e,e,e,d,b)
caseB(10,e,e,d,d,d,b,b,b)
case 11:
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
break;
}
case 12:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 13:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 14:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 15:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 16:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, e, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 17:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 18:
{
if (PixelsEqual(source,b,source,f,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, b, b, d };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, d };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
default:
{
if (PixelsEqual(source,d,source,h,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, d, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
}
#undef caseA
#undef caseB
}
static inline unsigned int Hq2XPatternToNumber(const int *pattern)
{
ssize_t
i;
unsigned int
result,
order;
result=0;
order=1;
for (i=7; i >= 0; i--)
{
result+=order*pattern[i];
order*=2;
}
return(result);
}
static inline void Hq2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
static const unsigned int
Hq2XTable[] =
{
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14
};
const int
pattern1[] =
{
!PixelsEqual(pixels,4,pixels,8,channels),
!PixelsEqual(pixels,4,pixels,7,channels),
!PixelsEqual(pixels,4,pixels,6,channels),
!PixelsEqual(pixels,4,pixels,5,channels),
!PixelsEqual(pixels,4,pixels,3,channels),
!PixelsEqual(pixels,4,pixels,2,channels),
!PixelsEqual(pixels,4,pixels,1,channels),
!PixelsEqual(pixels,4,pixels,0,channels)
};
#define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5]
const int pattern2[] = { Rotated(pattern1) };
const int pattern3[] = { Rotated(pattern2) };
const int pattern4[] = { Rotated(pattern3) };
#undef Rotated
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0,
channels,4,0,1,3,5,7);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1,
channels,4,2,5,1,7,3);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3,
channels,4,8,7,5,3,1);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2,
channels,4,6,3,7,1,5);
}
static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
#define Corner(A,B,C,D) \
{ \
if (intensities[B] > intensities[A]) \
{ \
ssize_t \
offsets[3] = { B, C, D }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
else \
{ \
ssize_t \
offsets[3] = { A, B, C }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
}
#define Line(A,B,C,D) \
{ \
if (intensities[C] > intensities[A]) \
Mix2Pixels(pixels,C,D,result,3,channels); \
else \
Mix2Pixels(pixels,A,B,result,3,channels); \
}
MagickFloatType
intensities[9];
int
ae,
bd,
ab,
ad,
be,
de;
register ssize_t
i;
ssize_t
offsets[4] = { 0, 1, 3, 4 };
for (i=0; i < 9; i++)
intensities[i]=GetPixelIntensity(source,pixels + i*channels);
CopyPixels(pixels,0,result,0,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result,
1,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result,
2,channels);
ae=PixelsEqual(pixels,0,pixels,4,channels);
bd=PixelsEqual(pixels,1,pixels,3,channels);
ab=PixelsEqual(pixels,0,pixels,1,channels);
de=PixelsEqual(pixels,3,pixels,4,channels);
ad=PixelsEqual(pixels,0,pixels,3,channels);
be=PixelsEqual(pixels,1,pixels,4,channels);
if (ae && bd && ab)
{
CopyPixels(pixels,0,result,3,channels);
return;
}
if (ad && de && !ab)
{
Corner(1,0,4,3)
return;
}
if (be && de && !ab)
{
Corner(0,1,3,4)
return;
}
if (ad && ab && !be)
{
Corner(4,3,1,0)
return;
}
if (ab && be && !ad)
{
Corner(3,0,4,1)
return;
}
if (ae && (!bd || intensities[1] > intensities[0]))
{
Mix2Pixels(pixels,0,4,result,3,channels);
return;
}
if (bd && (!ae || intensities[0] > intensities[1]))
{
Mix2Pixels(pixels,1,3,result,3,channels);
return;
}
if (ab)
{
Line(0,1,3,4)
return;
}
if (de)
{
Line(3,4,0,1)
return;
}
if (ad)
{
Line(0,3,1,4)
return;
}
if (be)
{
Line(1,4,0,3)
return;
}
MixPixels(pixels,offsets,4,result,3,channels);
#undef Corner
#undef Line
}
static void Xbr2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
#define WeightVar(M,N) const int w_##M##_##N = \
PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1;
WeightVar(12,11)
WeightVar(12,7)
WeightVar(12,13)
WeightVar(12,17)
WeightVar(12,16)
WeightVar(12,8)
WeightVar(6,10)
WeightVar(6,2)
WeightVar(11,7)
WeightVar(11,17)
WeightVar(11,5)
WeightVar(7,13)
WeightVar(7,1)
WeightVar(12,6)
WeightVar(12,18)
WeightVar(8,14)
WeightVar(8,2)
WeightVar(13,17)
WeightVar(13,9)
WeightVar(7,3)
WeightVar(16,10)
WeightVar(16,22)
WeightVar(17,21)
WeightVar(11,15)
WeightVar(18,14)
WeightVar(18,22)
WeightVar(17,23)
WeightVar(17,19)
#undef WeightVar
if (
w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) <
w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0,
channels);
else
CopyPixels(pixels,12,result,0,channels);
if (
w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) <
w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8)
)
Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1,
channels);
else
CopyPixels(pixels,12,result,1,channels);
if (
w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) <
w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2,
channels);
else
CopyPixels(pixels,12,result,2,channels);
if (
w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) <
w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18)
)
Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3,
channels);
else
CopyPixels(pixels,12,result,3,channels);
}
static void Scale2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
if (PixelsEqual(pixels,1,pixels,7,channels) ||
PixelsEqual(pixels,3,pixels,5,channels))
{
register ssize_t
i;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
return;
}
if (PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (PixelsEqual(pixels,1,pixels,5,channels))
CopyPixels(pixels,5,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
}
static void Epbx2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
#define HelperCond(a,b,c,d,e,f,g) ( \
PixelsEqual(pixels,a,pixels,b,channels) && ( \
PixelsEqual(pixels,c,pixels,d,channels) || \
PixelsEqual(pixels,c,pixels,e,channels) || \
PixelsEqual(pixels,a,pixels,f,channels) || \
PixelsEqual(pixels,b,pixels,g,channels) \
) \
)
register ssize_t
i;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (
!PixelsEqual(pixels,3,pixels,5,channels) &&
!PixelsEqual(pixels,1,pixels,7,channels) &&
(
PixelsEqual(pixels,4,pixels,3,channels) ||
PixelsEqual(pixels,4,pixels,7,channels) ||
PixelsEqual(pixels,4,pixels,5,channels) ||
PixelsEqual(pixels,4,pixels,1,channels) ||
(
(
!PixelsEqual(pixels,0,pixels,8,channels) ||
PixelsEqual(pixels,4,pixels,6,channels) ||
PixelsEqual(pixels,3,pixels,2,channels)
) &&
(
!PixelsEqual(pixels,6,pixels,2,channels) ||
PixelsEqual(pixels,4,pixels,0,channels) ||
PixelsEqual(pixels,4,pixels,8,channels)
)
)
)
)
{
if (HelperCond(1,3,4,0,8,2,6))
Mix2Pixels(pixels,1,3,result,0,channels);
if (HelperCond(5,1,4,2,6,8,0))
Mix2Pixels(pixels,5,1,result,1,channels);
if (HelperCond(3,7,4,6,2,0,8))
Mix2Pixels(pixels,3,7,result,2,channels);
if (HelperCond(7,5,4,8,0,6,2))
Mix2Pixels(pixels,7,5,result,3,channels);
}
#undef HelperCond
}
static inline void Eagle3X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
if (corner_tl && corner_tr)
Mix2Pixels(pixels,0,2,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
if (corner_tl && corner_bl)
Mix2Pixels(pixels,0,6,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (corner_tr && corner_br)
Mix2Pixels(pixels,2,8,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
if (corner_bl && corner_br)
Mix2Pixels(pixels,6,8,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Eagle3XB(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Scale3X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
if (!PixelsEqual(pixels,1,pixels,7,channels) &&
!PixelsEqual(pixels,3,pixels,5,channels))
{
if (PixelsEqual(pixels,3,pixels,1,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
) ||
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,1,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,5,pixels,1,channels))
CopyPixels(pixels,5,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
) ||
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,3,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
)
)
CopyPixels(pixels,5,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,6,channels);
else
CopyPixels(pixels,4,result,6,channels);
if (
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
)
)
CopyPixels(pixels,7,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,8,channels);
else
CopyPixels(pixels,4,result,8,channels);
}
else
{
register ssize_t
i;
for (i=0; i < 9; i++)
CopyPixels(pixels,4,result,i,channels);
}
}
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
const char
*option;
Image
*source_image,
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
rectangle;
ssize_t
y;
unsigned char
magnification,
width;
void
(*scaling_method)(const Image *,const Quantum *,Quantum *,size_t);
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
option=GetImageOption(image->image_info,"magnify:method");
if (option == (char *) NULL)
option="scale2x";
scaling_method=Scale2X;
magnification=1;
width=1;
switch (*option)
{
case 'e':
{
if (LocaleCompare(option,"eagle2x") == 0)
{
scaling_method=Eagle2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"eagle3x") == 0)
{
scaling_method=Eagle3X;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"eagle3xb") == 0)
{
scaling_method=Eagle3XB;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"epbx2x") == 0)
{
scaling_method=Epbx2X;
magnification=2;
width=3;
break;
}
break;
}
case 'f':
{
if (LocaleCompare(option,"fish2x") == 0)
{
scaling_method=Fish2X;
magnification=2;
width=3;
break;
}
break;
}
case 'h':
{
if (LocaleCompare(option,"hq2x") == 0)
{
scaling_method=Hq2X;
magnification=2;
width=3;
break;
}
break;
}
case 's':
{
if (LocaleCompare(option,"scale2x") == 0)
{
scaling_method=Scale2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"scale3x") == 0)
{
scaling_method=Scale3X;
magnification=3;
width=3;
break;
}
break;
}
case 'x':
{
if (LocaleCompare(option,"xbr2x") == 0)
{
scaling_method=Xbr2X;
magnification=2;
width=5;
}
break;
}
default:
break;
}
/*
Make a working copy of the source image and convert it to RGB colorspace.
*/
source_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
offset.x=0;
offset.y=0;
rectangle.x=0;
rectangle.y=0;
rectangle.width=image->columns;
rectangle.height=image->rows;
(void) CopyImagePixels(source_image,image,&rectangle,&offset,exception);
(void) SetImageColorspace(source_image,RGBColorspace,exception);
magnify_image=CloneImage(source_image,magnification*source_image->columns,
magnification*source_image->rows,MagickTrue,exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(source_image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,magnify_image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
Quantum
r[128]; /* to hold result pixels */
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y,
magnify_image->columns,magnification,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register const Quantum
*magick_restrict p;
size_t
channels;
register ssize_t
i;
ssize_t
j;
p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width,
exception);
channels=GetPixelChannels(source_image);
scaling_method(source_image,p,r,channels);
/*
Copy the result pixels into the final image.
*/
for (j=0; j < (ssize_t) magnification; j++)
for (i=0; i < (ssize_t) (channels*magnification); i++)
q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i];
q+=magnification*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double x_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double y_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
filename[MagickPathExtent],
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
GetPathComponent(image->magick_filename,TailPath,filename);
(void) CopyMagickString(value,filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // xxx
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
int g_seed = 0x3f;
struct JtR_FEAL8_CTX ctx;
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index], &g_seed, &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
SybasePROP_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
jik_optimize.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int A_row;
int A_col;
int B_row;
int B_col;
int **constructMatrix(int row, int col){
int **matrix = (int **)malloc(sizeof(int *) * row);
for (int i = 0; i < row;i++){
matrix[i] = (int *)malloc(sizeof(int) * col);
}
return matrix;
}
void freeMatrix(int **matrix, int row, int col){
for (int i = 0; i < row;i++){
free(matrix[i]);
}
free(matrix);
}
int main(int argc, char *argv[]){
A_row = atoi(*(argv + 1));
A_col = atoi(*(argv + 2));
B_row = atoi(*(argv + 3));
B_col = atoi(*(argv + 4));
int number_of_threads = atoi(*(argv + 5));
FILE *input = fopen("matrix", "r");
int **A = constructMatrix(A_row, A_col);
int **B = constructMatrix(B_row, B_col);
int **C = constructMatrix(A_row, B_col);
//read A
for (int i = 0; i < A_row;i++){
for (int j = 0; j < A_col;j++){
fscanf(input, "%d", &A[i][j]);
}
}
//read B
for (int i = 0; i < B_row;i++){
for (int j = 0; j < B_col;j++){
fscanf(input, "%d", &B[i][j]);
}
}
fclose(input);
double start_time = omp_get_wtime();
//multiply:
int i, j, k;
int sum;
#pragma omp parallel for shared(A,B,C) private(i,j,k,sum) num_threads(number_of_threads)
for (j = 0; j < B_col;j++){
for (i = 0; i < A_row;i++){
sum = 0;
for (k = 0; k < A_col;k++){
sum += A[i][k] * B[k][j];
}
C[i][j] = sum;
}
}
double end_time = omp_get_wtime();
printf("%s: %g sec.\n", "jik_optimize_runtime", end_time - start_time);
//output the result to compare with golden result
FILE *out = fopen("jik_optimize_result", "w");
for (int i = 0; i < A_row;i++){
for (int j = 0; j < B_col;j++){
fprintf(out, "%d ", C[i][j]);
}
fprintf(out, "\n");
}
fprintf(out, "\n");
fclose(out);
freeMatrix(A, A_row, A_col);
freeMatrix(B, B_row, B_col);
freeMatrix(C, A_row, B_col);
return 0;
} |
eavlSourceTopologyGatherMapOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SOURCE_TOPOLOGY_GATHER_MAP_OP_H
#define EAVL_SOURCE_TOPOLOGY_GATHER_MAP_OP_H
#include "eavlCUDA.h"
#include "eavlCellSet.h"
#include "eavlCellSetExplicit.h"
#include "eavlCellSetAllStructured.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlTopology.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
template <class CONN>
struct eavlSourceTopologyGatherMapOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN s_inputs, OUT outputs,
INDEX indices, F &functor)
{
int *sparseindices = get<0>(indices).array;
int ids[MAX_LOCAL_TOPOLOGY_IDS]; // these are effectively our src indices
#pragma omp parallel for private(ids)
for (int denseindex = 0; denseindex < nitems; ++denseindex)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
typename collecttype<OUT>::type out(collect(denseindex, outputs));
out = functor(shapeType, nids, ids, s_inputs);
}
}
};
#if defined __CUDACC__
template <class CONN, class F, class IN, class OUT, class INDEX>
__global__ void
eavlSourceTopologyGatherMapOp_kernel(int nitems, CONN conn,
const IN s_inputs, OUT outputs,
INDEX indices, F functor)
{
int *sparseindices = get<0>(indices).array;
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int ids[MAX_LOCAL_TOPOLOGY_IDS];
for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
collect(denseindex, outputs) = functor(shapeType, nids, ids, s_inputs);
}
}
template <class CONN>
struct eavlSourceTopologyGatherMapOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN s_inputs, OUT outputs,
INDEX indices, F &functor)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlSourceTopologyGatherMapOp_kernel<<< blocks, threads >>>(nitems, conn,
s_inputs, outputs,
indices, functor);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
// ****************************************************************************
// Class: eavlSourceTopologyGatherMapOp
//
// Purpose:
/// Map from one topological element in a mesh to another, with
/// input arrays on the source topology (at sparsely indexed locations as
/// specific by the index array) and with outputs on the destination
/// topology (and densely indexed locations 0 to n-1).
//
// Programmer: Jeremy Meredith
// Creation: August 1, 2013
//
// Modifications:
// ****************************************************************************
template <class IS, class O, class INDEX, class F>
class eavlSourceTopologyGatherMapOp : public eavlOperation
{
protected:
eavlCellSet *cells;
eavlTopology topology;
IS s_inputs;
O outputs;
INDEX indices;
F functor;
public:
eavlSourceTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
IS is, O o, INDEX ind, F f)
: cells(c), topology(t), s_inputs(is), outputs(o), indices(ind), functor(f)
{
}
virtual void GoCPU()
{
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_CPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_CPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
conn.shapetype.NeedOnDevice();
conn.connectivity.NeedOnDevice();
conn.mapCellToIndex.NeedOnDevice();
eavlOpDispatch<eavlSourceTopologyGatherMapOp_GPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
conn.shapetype.NeedOnHost();
conn.connectivity.NeedOnHost();
conn.mapCellToIndex.NeedOnHost();
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_GPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class IS, class O, class INDEX, class F>
eavlSourceTopologyGatherMapOp<IS,O,INDEX,F> *new_eavlSourceTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
IS is, O o, INDEX indices, F f)
{
return new eavlSourceTopologyGatherMapOp<IS,O,INDEX,F>(c,t,is,o,indices,f);
}
#endif
|
collatzGuided.c | // test file to execute the collatz conjecture on 1 proc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
typedef unsigned long long ullong;
ullong hotpo(ullong currn);
int main(int argc, char** argv) {
ullong n, // track current n
high, // highest number recorded
nmax = (argc > 1) ? atoi(argv[1]) : 50,
imax = 2000000; // max number of iteration for a seed n
#pragma omp parallel
{
printf("worker %d/%d ready to roll\n", omp_get_thread_num(), omp_get_num_threads());
}
high = 0; // starting with n itself as highest
/* timers */
double startTime = omp_get_wtime(),
endTime;
// #pragma omp parallel for private(high) schedule(guided) reduction(max:high)
#pragma omp parallel for schedule(guided, 50) reduction(max:high)
for(ullong j = 1; j <= nmax; ++j) {
n = j;
// printf("n: %lld", n);
for(ullong i = 1; i <= imax; ++i) {
n = hotpo(n);
if(n > high) high = n;
// if(i < 10) printf(",%lld",n);
if( n == 1 ) break; // stop if reach 1
}
// printf("\n");
}
printf("\nHigh: %lld\n", high);
endTime = omp_get_wtime();
printf("\nruntime = %.16e\n", endTime - startTime);
return 0;
}
ullong hotpo(ullong currn) {
return (
(currn % 2 == 0)? currn/2 : 3*currn + 1
);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.